Merge tag 'v4.4.13' into linux-linaro-lsk-v4.4
authorAlex Shi <alex.shi@linaro.org>
Tue, 14 Jun 2016 09:07:59 +0000 (17:07 +0800)
committerAlex Shi <alex.shi@linaro.org>
Tue, 14 Jun 2016 09:07:59 +0000 (17:07 +0800)
 This is the 4.4.13 stable release

211 files changed:
Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10
Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
Documentation/ABI/testing/sysfs-bus-coresight-devices-stm [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc
Documentation/ABI/testing/sysfs-class-stm
Documentation/arm64/booting.txt
Documentation/arm64/silicon-errata.txt [new file with mode: 0644]
Documentation/devicetree/bindings/opp/opp.txt
Documentation/features/time/irq-time-acct/arch-support.txt
Documentation/features/vm/huge-vmap/arch-support.txt
Documentation/kernel-parameters.txt
Documentation/trace/coresight.txt
MAINTAINERS
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/kvm_asm.h
arch/arm/kvm/arm.c
arch/arm/vdso/vdso.S
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/boot.h
arch/arm64/include/asm/brk-imm.h [new file with mode: 0644]
arch/arm64/include/asm/bug.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/debug-monitors.h
arch/arm64/include/asm/elf.h
arch/arm64/include/asm/fixmap.h
arch/arm64/include/asm/ftrace.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/hardirq.h
arch/arm64/include/asm/hugetlb.h
arch/arm64/include/asm/irq.h
arch/arm64/include/asm/kasan.h
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/lse.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/shmparam.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/spinlock.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/uaccess.h
arch/arm64/include/asm/word-at-a-time.h
arch/arm64/include/uapi/asm/ptrace.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/acpi_parking_protocol.c [new file with mode: 0644]
arch/arm64/kernel/alternative.c
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/head.S
arch/arm64/kernel/image.h
arch/arm64/kernel/irq.c
arch/arm64/kernel/kaslr.c [new file with mode: 0644]
arch/arm64/kernel/module-plts.c [new file with mode: 0644]
arch/arm64/kernel/module.c
arch/arm64/kernel/module.lds [new file with mode: 0644]
arch/arm64/kernel/perf_callchain.c
arch/arm64/kernel/process.c
arch/arm64/kernel/return_address.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/sleep.S
arch/arm64/kernel/smp.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/time.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso/vdso.S
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/hyp.S
arch/arm64/lib/Makefile
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_page.S
arch/arm64/lib/copy_to_user.S
arch/arm64/mm/cache.S
arch/arm64/mm/context.c
arch/arm64/mm/copypage.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/dump.c
arch/arm64/mm/extable.c
arch/arm64/mm/fault.c
arch/arm64/mm/flush.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/pageattr.c
arch/arm64/mm/pgd.c
arch/arm64/mm/proc-macros.S
arch/arm64/mm/proc.S
arch/parisc/Kconfig
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/cache.h
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/uaccess.h
arch/parisc/mm/fault.c
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/entry/vdso/vdso2c.h
arch/x86/include/asm/cacheflush.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/kvm_para.h
arch/x86/include/asm/sections.h
arch/x86/kernel/ftrace.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/test_nx.c
arch/x86/kernel/test_rodata.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/pageattr.c
drivers/base/power/opp/Makefile
drivers/base/power/opp/core.c
drivers/base/power/opp/cpu.c
drivers/base/power/opp/debugfs.c [new file with mode: 0644]
drivers/base/power/opp/opp.h
drivers/cpufreq/cpufreq-dt.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/arm64-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/libstub/random.c [new file with mode: 0644]
drivers/hwtracing/coresight/Kconfig
drivers/hwtracing/coresight/Makefile
drivers/hwtracing/coresight/coresight-etb10.c
drivers/hwtracing/coresight/coresight-etm-perf.c [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-etm-perf.h [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-etm.h
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-etm3x.c
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/hwtracing/coresight/coresight-etm4x.h
drivers/hwtracing/coresight/coresight-funnel.c
drivers/hwtracing/coresight/coresight-priv.h
drivers/hwtracing/coresight/coresight-replicator-qcom.c
drivers/hwtracing/coresight/coresight-replicator.c
drivers/hwtracing/coresight/coresight-stm.c [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-tmc-etf.c [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-tmc-etr.c [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-tmc.c
drivers/hwtracing/coresight/coresight-tmc.h [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-tpiu.c
drivers/hwtracing/coresight/coresight.c
drivers/hwtracing/coresight/of_coresight.c
drivers/hwtracing/stm/Kconfig
drivers/hwtracing/stm/Makefile
drivers/hwtracing/stm/core.c
drivers/hwtracing/stm/dummy_stm.c
drivers/hwtracing/stm/heartbeat.c [new file with mode: 0644]
drivers/hwtracing/stm/policy.c
drivers/hwtracing/stm/stm.h
drivers/misc/lkdtm.c
drivers/of/fdt.c
include/asm-generic/fixmap.h
include/asm-generic/vmlinux.lds.h
include/linux/amba/bus.h
include/linux/cache.h
include/linux/coresight-pmu.h [new file with mode: 0644]
include/linux/coresight-stm.h [new file with mode: 0644]
include/linux/coresight.h
include/linux/efi.h
include/linux/hugetlb.h
include/linux/init.h
include/linux/pm_opp.h
include/linux/stm.h
include/uapi/linux/coresight-stm.h [new file with mode: 0644]
init/main.c
kernel/debug/kdb/kdb_bp.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
lib/extable.c
scripts/sortextable.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/builtin-inject.c
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/session.c

index 4b8d6ec92e2b91560123144c45e0afb19464287c..b5f526081711878eb46f0586415f043b929aaba0 100644 (file)
@@ -6,13 +6,6 @@ Description:   (RW) Add/remove a sink from a trace path.  There can be multiple
                source for a single sink.
                ex: echo 1 > /sys/bus/coresight/devices/20010000.etb/enable_sink
 
-What:          /sys/bus/coresight/devices/<memory_map>.etb/status
-Date:          November 2014
-KernelVersion: 3.19
-Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
-Description:   (R) List various control and status registers.  The specific
-               layout and content is driver specific.
-
 What:          /sys/bus/coresight/devices/<memory_map>.etb/trigger_cntr
 Date:          November 2014
 KernelVersion: 3.19
@@ -22,3 +15,65 @@ Description: (RW) Disables write access to the Trace RAM by stopping the
                following the trigger event. The number of 32-bit words written
                into the Trace RAM following the trigger event is equal to the
                value stored in this register+1 (from ARM ETB-TRM).
+
+What:          /sys/bus/coresight/devices/<memory_map>.etb/mgmt/rdp
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Defines the depth, in words, of the trace RAM in powers of
+               2.  The value is read directly from HW register RDP, 0x004.
+
+What:          /sys/bus/coresight/devices/<memory_map>.etb/mgmt/sts
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the ETB status register.  The value
+               is read directly from HW register STS, 0x00C.
+
+What:          /sys/bus/coresight/devices/<memory_map>.etb/mgmt/rrp
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the ETB RAM Read Pointer register
+               that is used to read entries from the Trace RAM over the APB
+               interface.  The value is read directly from HW register RRP,
+               0x014.
+
+What:          /sys/bus/coresight/devices/<memory_map>.etb/mgmt/rwp
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the ETB RAM Write Pointer register
+               that is used to sets the write pointer to write entries from
+               the CoreSight bus into the Trace RAM. The value is read directly
+               from HW register RWP, 0x018.
+
+What:          /sys/bus/coresight/devices/<memory_map>.etb/mgmt/trg
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Similar to "trigger_cntr" above except that this value is
+               read directly from HW register TRG, 0x01C.
+
+What:          /sys/bus/coresight/devices/<memory_map>.etb/mgmt/ctl
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the ETB Control register. The value
+               is read directly from HW register CTL, 0x020.
+
+What:          /sys/bus/coresight/devices/<memory_map>.etb/mgmt/ffsr
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the ETB Formatter and Flush Status
+               register.  The value is read directly from HW register FFSR,
+               0x300.
+
+What:          /sys/bus/coresight/devices/<memory_map>.etb/mgmt/ffcr
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the ETB Formatter and Flush Control
+               register.  The value is read directly from HW register FFCR,
+               0x304.
index 2355ed8ae31f732d1567f2b9085d86574f817523..36258bc1b473a9c826d272bd3328f7c79562e49a 100644 (file)
@@ -359,6 +359,19 @@ Contact:   Mathieu Poirier <mathieu.poirier@linaro.org>
 Description:   (R) Print the content of the Peripheral ID3 Register
                (0xFEC).  The value is taken directly from the HW.
 
+What:          /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcconfig
+Date:          February 2016
+KernelVersion: 4.07
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Print the content of the trace configuration register
+               (0x010) as currently set by SW.
+
+What:          /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trctraceid
+Date:          February 2016
+KernelVersion: 4.07
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Print the content of the trace ID register (0x040).
+
 What:          /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr0
 Date:          April 2015
 KernelVersion: 4.01
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-stm b/Documentation/ABI/testing/sysfs-bus-coresight-devices-stm
new file mode 100644 (file)
index 0000000..1dffabe
--- /dev/null
@@ -0,0 +1,53 @@
+What:          /sys/bus/coresight/devices/<memory_map>.stm/enable_source
+Date:          April 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (RW) Enable/disable tracing on this specific trace macrocell.
+               Enabling the trace macrocell implies it has been configured
+               properly and a sink has been identified for it.  The path
+               of coresight components linking the source to the sink is
+               configured and managed automatically by the coresight framework.
+
+What:          /sys/bus/coresight/devices/<memory_map>.stm/hwevent_enable
+Date:          April 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (RW) Provides access to the HW event enable register, used in
+               conjunction with HW event bank select register.
+
+What:          /sys/bus/coresight/devices/<memory_map>.stm/hwevent_select
+Date:          April 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (RW) Gives access to the HW event block select register
+               (STMHEBSR) in order to configure up to 256 channels.  Used in
+               conjunction with "hwevent_enable" register as described above.
+
+What:          /sys/bus/coresight/devices/<memory_map>.stm/port_enable
+Date:          April 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (RW) Provides access to the stimulus port enable register
+               (STMSPER).  Used in conjunction with "port_select" described
+               below.
+
+What:          /sys/bus/coresight/devices/<memory_map>.stm/port_select
+Date:          April 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (RW) Used to determine which bank of stimulus port bit in
+               register STMSPER (see above) apply to.
+
+What:          /sys/bus/coresight/devices/<memory_map>.stm/status
+Date:          April 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) List various control and status registers.  The specific
+               layout and content is driver specific.
+
+What:          /sys/bus/coresight/devices/<memory_map>.stm/traceid
+Date:          April 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (RW) Holds the trace ID that will appear in the trace stream
+               coming from this trace entity.
index f38cded5fa22cfefa4bd91521d36ebef89b581e7..4fe677ed1305c8ecaf4a4f0a68af1745a8d0ce02 100644 (file)
@@ -6,3 +6,80 @@ Description:   (RW) Disables write access to the Trace RAM by stopping the
                formatter after a defined number of words have been stored
                following the trigger event. Additional interface for this
                driver are expected to be added as it matures.
+
+What:           /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/rsz
+Date:           March 2016
+KernelVersion:  4.7
+Contact:        Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:    (R) Defines the size, in 32-bit words, of the local RAM buffer.
+                The value is read directly from HW register RSZ, 0x004.
+
+What:           /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/sts
+Date:           March 2016
+KernelVersion:  4.7
+Contact:        Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the TMC status register.  The value
+                is read directly from HW register STS, 0x00C.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/rrp
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the TMC RAM Read Pointer register
+               that is used to read entries from the Trace RAM over the APB
+               interface.  The value is read directly from HW register RRP,
+               0x014.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/rwp
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the TMC RAM Write Pointer register
+               that is used to sets the write pointer to write entries from
+               the CoreSight bus into the Trace RAM. The value is read directly
+               from HW register RWP, 0x018.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/trg
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Similar to "trigger_cntr" above except that this value is
+               read directly from HW register TRG, 0x01C.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/ctl
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the TMC Control register. The value
+               is read directly from HW register CTL, 0x020.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/ffsr
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the TMC Formatter and Flush Status
+               register.  The value is read directly from HW register FFSR,
+               0x300.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/ffcr
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the TMC Formatter and Flush Control
+               register.  The value is read directly from HW register FFCR,
+               0x304.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/mode
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Shows the value held by the TMC Mode register, which
+               indicate the mode the device has been configured to enact.  The
+               The value is read directly from the MODE register, 0x028.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/devid
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Mathieu Poirier <mathieu.poirier@linaro.org>
+Description:   (R) Indicates the capabilities of the Coresight TMC.
+               The value is read directly from the DEVID register, 0xFC8,
index c9aa4f3fc9a71f7429210346a61a727753ed724e..77ed3da0f68e437f9c82ccfebcdc79d761831833 100644 (file)
@@ -12,3 +12,13 @@ KernelVersion:       4.3
 Contact:       Alexander Shishkin <alexander.shishkin@linux.intel.com>
 Description:
                Shows the number of channels per master on this STM device.
+
+What:          /sys/class/stm/<stm>/hw_override
+Date:          March 2016
+KernelVersion: 4.7
+Contact:       Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Description:
+               Reads as 0 if master numbers in the STP stream produced by
+               this stm device will match the master numbers assigned by
+               the software or 1 if the stm hardware overrides software
+               assigned masters.
index 701d39d3171a74d8c2eb670c0b1be2931f326ee8..56d6d8b796db6dd3aadd252a85cc4b691f9e20f7 100644 (file)
@@ -109,7 +109,13 @@ Header notes:
                        1 - 4K
                        2 - 16K
                        3 - 64K
-  Bits 3-63:   Reserved.
+  Bit 3:       Kernel physical placement
+                       0 - 2MB aligned base should be as close as possible
+                           to the base of DRAM, since memory below it is not
+                           accessible via the linear mapping
+                       1 - 2MB aligned base may be anywhere in physical
+                           memory
+  Bits 4-63:   Reserved.
 
 - When image_size is zero, a bootloader should attempt to keep as much
   memory as possible free for use by the kernel immediately after the
@@ -117,14 +123,14 @@ Header notes:
   depending on selected features, and is effectively unbound.
 
 The Image must be placed text_offset bytes from a 2MB aligned base
-address near the start of usable system RAM and called there. Memory
-below that base address is currently unusable by Linux, and therefore it
-is strongly recommended that this location is the start of system RAM.
-The region between the 2 MB aligned base address and the start of the
-image has no special significance to the kernel, and may be used for
-other purposes.
+address anywhere in usable system RAM and called there. The region
+between the 2 MB aligned base address and the start of the image has no
+special significance to the kernel, and may be used for other purposes.
 At least image_size bytes from the start of the image must be free for
 use by the kernel.
+NOTE: versions prior to v4.6 cannot make use of memory below the
+physical offset of the Image so it is recommended that the Image be
+placed as close as possible to the start of system RAM.
 
 Any memory described to the kernel (even that below the start of the
 image) which is not marked as reserved from the kernel (e.g., with a
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
new file mode 100644 (file)
index 0000000..58b71dd
--- /dev/null
@@ -0,0 +1,58 @@
+                Silicon Errata and Software Workarounds
+                =======================================
+
+Author: Will Deacon <will.deacon@arm.com>
+Date  : 27 November 2015
+
+It is an unfortunate fact of life that hardware is often produced with
+so-called "errata", which can cause it to deviate from the architecture
+under specific circumstances.  For hardware produced by ARM, these
+errata are broadly classified into the following categories:
+
+  Category A: A critical error without a viable workaround.
+  Category B: A significant or critical error with an acceptable
+              workaround.
+  Category C: A minor error that is not expected to occur under normal
+              operation.
+
+For more information, consult one of the "Software Developers Errata
+Notice" documents available on infocenter.arm.com (registration
+required).
+
+As far as Linux is concerned, Category B errata may require some special
+treatment in the operating system. For example, avoiding a particular
+sequence of code, or configuring the processor in a particular way. A
+less common situation may require similar actions in order to declassify
+a Category A erratum into a Category C erratum. These are collectively
+known as "software workarounds" and are only required in the minority of
+cases (e.g. those cases that both require a non-secure workaround *and*
+can be triggered by Linux).
+
+For software workarounds that may adversely impact systems unaffected by
+the erratum in question, a Kconfig entry is added under "Kernel
+Features" -> "ARM errata workarounds via the alternatives framework".
+These are enabled by default and patched in at runtime when an affected
+CPU is detected. For less-intrusive workarounds, a Kconfig option is not
+available and the code is structured (preferably with a comment) in such
+a way that the erratum will not be hit.
+
+This approach can make it slightly onerous to determine exactly which
+errata are worked around in an arbitrary kernel source tree, so this
+file acts as a registry of software workarounds in the Linux Kernel and
+will be updated when new workarounds are committed and backported to
+stable kernels.
+
+| Implementor    | Component       | Erratum ID      | Kconfig                 |
++----------------+-----------------+-----------------+-------------------------+
+| ARM            | Cortex-A53      | #826319         | ARM64_ERRATUM_826319    |
+| ARM            | Cortex-A53      | #827319         | ARM64_ERRATUM_827319    |
+| ARM            | Cortex-A53      | #824069         | ARM64_ERRATUM_824069    |
+| ARM            | Cortex-A53      | #819472         | ARM64_ERRATUM_819472    |
+| ARM            | Cortex-A53      | #845719         | ARM64_ERRATUM_845719    |
+| ARM            | Cortex-A53      | #843419         | ARM64_ERRATUM_843419    |
+| ARM            | Cortex-A57      | #832075         | ARM64_ERRATUM_832075    |
+| ARM            | Cortex-A57      | #852523         | N/A                     |
+| ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220    |
+|                |                 |                 |                         |
+| Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
+| Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
index 0cb44dc21f97ca7cfde5056ce7d44941d3439546..601256fe8c0dd99d2df3ff5a77b2cee23a852e5d 100644 (file)
@@ -45,21 +45,10 @@ Devices supporting OPPs must set their "operating-points-v2" property with
 phandle to a OPP table in their DT node. The OPP core will use this phandle to
 find the operating points for the device.
 
-Devices may want to choose OPP tables at runtime and so can provide a list of
-phandles here. But only *one* of them should be chosen at runtime. This must be
-accompanied by a corresponding "operating-points-names" property, to uniquely
-identify the OPP tables.
-
 If required, this can be extended for SoC vendor specfic bindings. Such bindings
 should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
 and should have a compatible description like: "operating-points-v2-<vendor>".
 
-Optional properties:
-- operating-points-names: Names of OPP tables (required if multiple OPP
-  tables are present), to uniquely identify them. The same list must be present
-  for all the CPUs which are sharing clock/voltage rails and hence the OPP
-  tables.
-
 * OPP Table Node
 
 This describes the OPPs belonging to a device. This node can have following
@@ -100,6 +89,14 @@ Optional properties:
   Entries for multiple regulators must be present in the same order as
   regulators are specified in device's DT node.
 
+- opp-microvolt-<name>: Named opp-microvolt property. This is exactly similar to
+  the above opp-microvolt property, but allows multiple voltage ranges to be
+  provided for the same OPP. At runtime, the platform can pick a <name> and
+  matching opp-microvolt-<name> property will be enabled for all OPPs. If the
+  platform doesn't pick a specific <name> or the <name> doesn't match with any
+  opp-microvolt-<name> properties, then opp-microvolt property shall be used, if
+  present.
+
 - opp-microamp: The maximum current drawn by the device in microamperes
   considering system specific parameters (such as transients, process, aging,
   maximum operating temperature range etc.) as necessary. This may be used to
@@ -112,6 +109,9 @@ Optional properties:
   for few regulators, then this should be marked as zero for them. If it isn't
   required for any regulator, then this property need not be present.
 
+- opp-microamp-<name>: Named opp-microamp property. Similar to
+  opp-microvolt-<name> property, but for microamp instead.
+
 - clock-latency-ns: Specifies the maximum possible transition latency (in
   nanoseconds) for switching to this OPP from any other OPP.
 
@@ -123,6 +123,26 @@ Optional properties:
 - opp-suspend: Marks the OPP to be used during device suspend. Only one OPP in
   the table should have this.
 
+- opp-supported-hw: This enables us to select only a subset of OPPs from the
+  larger OPP table, based on what version of the hardware we are running on. We
+  still can't have multiple nodes with the same opp-hz value in OPP table.
+
+  It's an user defined array containing a hierarchy of hardware version numbers,
+  supported by the OPP. For example: a platform with hierarchy of three levels
+  of versions (A, B and C), this field should be like <X Y Z>, where X
+  corresponds to Version hierarchy A, Y corresponds to version hierarchy B and Z
+  corresponds to version hierarchy C.
+
+  Each level of hierarchy is represented by a 32 bit value, and so there can be
+  only 32 different supported version per hierarchy. i.e. 1 bit per version. A
+  value of 0xFFFFFFFF will enable the OPP for all versions for that hierarchy
+  level. And a value of 0x00000000 will disable the OPP completely, and so we
+  never want that to happen.
+
+  If 32 values aren't sufficient for a version hierarchy, than that version
+  hierarchy can be contained in multiple 32 bit values. i.e. <X Y Z1 Z2> in the
+  above example, Z1 & Z2 refer to the version hierarchy Z.
+
 - status: Marks the node enabled/disabled.
 
 Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
@@ -157,20 +177,20 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
                compatible = "operating-points-v2";
                opp-shared;
 
-               opp00 {
+               opp@1000000000 {
                        opp-hz = /bits/ 64 <1000000000>;
                        opp-microvolt = <970000 975000 985000>;
                        opp-microamp = <70000>;
                        clock-latency-ns = <300000>;
                        opp-suspend;
                };
-               opp01 {
+               opp@1100000000 {
                        opp-hz = /bits/ 64 <1100000000>;
                        opp-microvolt = <980000 1000000 1010000>;
                        opp-microamp = <80000>;
                        clock-latency-ns = <310000>;
                };
-               opp02 {
+               opp@1200000000 {
                        opp-hz = /bits/ 64 <1200000000>;
                        opp-microvolt = <1025000>;
                        clock-latency-ns = <290000>;
@@ -236,20 +256,20 @@ independently.
                 * independently.
                 */
 
-               opp00 {
+               opp@1000000000 {
                        opp-hz = /bits/ 64 <1000000000>;
                        opp-microvolt = <970000 975000 985000>;
                        opp-microamp = <70000>;
                        clock-latency-ns = <300000>;
                        opp-suspend;
                };
-               opp01 {
+               opp@1100000000 {
                        opp-hz = /bits/ 64 <1100000000>;
                        opp-microvolt = <980000 1000000 1010000>;
                        opp-microamp = <80000>;
                        clock-latency-ns = <310000>;
                };
-               opp02 {
+               opp@1200000000 {
                        opp-hz = /bits/ 64 <1200000000>;
                        opp-microvolt = <1025000>;
                        opp-microamp = <90000;
@@ -312,20 +332,20 @@ DVFS state together.
                compatible = "operating-points-v2";
                opp-shared;
 
-               opp00 {
+               opp@1000000000 {
                        opp-hz = /bits/ 64 <1000000000>;
                        opp-microvolt = <970000 975000 985000>;
                        opp-microamp = <70000>;
                        clock-latency-ns = <300000>;
                        opp-suspend;
                };
-               opp01 {
+               opp@1100000000 {
                        opp-hz = /bits/ 64 <1100000000>;
                        opp-microvolt = <980000 1000000 1010000>;
                        opp-microamp = <80000>;
                        clock-latency-ns = <310000>;
                };
-               opp02 {
+               opp@1200000000 {
                        opp-hz = /bits/ 64 <1200000000>;
                        opp-microvolt = <1025000>;
                        opp-microamp = <90000>;
@@ -338,20 +358,20 @@ DVFS state together.
                compatible = "operating-points-v2";
                opp-shared;
 
-               opp10 {
+               opp@1300000000 {
                        opp-hz = /bits/ 64 <1300000000>;
                        opp-microvolt = <1045000 1050000 1055000>;
                        opp-microamp = <95000>;
                        clock-latency-ns = <400000>;
                        opp-suspend;
                };
-               opp11 {
+               opp@1400000000 {
                        opp-hz = /bits/ 64 <1400000000>;
                        opp-microvolt = <1075000>;
                        opp-microamp = <100000>;
                        clock-latency-ns = <400000>;
                };
-               opp12 {
+               opp@1500000000 {
                        opp-hz = /bits/ 64 <1500000000>;
                        opp-microvolt = <1010000 1100000 1110000>;
                        opp-microamp = <95000>;
@@ -378,7 +398,7 @@ Example 4: Handling multiple regulators
                compatible = "operating-points-v2";
                opp-shared;
 
-               opp00 {
+               opp@1000000000 {
                        opp-hz = /bits/ 64 <1000000000>;
                        opp-microvolt = <970000>, /* Supply 0 */
                                        <960000>, /* Supply 1 */
@@ -391,7 +411,7 @@ Example 4: Handling multiple regulators
 
                /* OR */
 
-               opp00 {
+               opp@1000000000 {
                        opp-hz = /bits/ 64 <1000000000>;
                        opp-microvolt = <970000 975000 985000>, /* Supply 0 */
                                        <960000 965000 975000>, /* Supply 1 */
@@ -404,7 +424,7 @@ Example 4: Handling multiple regulators
 
                /* OR */
 
-               opp00 {
+               opp@1000000000 {
                        opp-hz = /bits/ 64 <1000000000>;
                        opp-microvolt = <970000 975000 985000>, /* Supply 0 */
                                        <960000 965000 975000>, /* Supply 1 */
@@ -417,7 +437,8 @@ Example 4: Handling multiple regulators
        };
 };
 
-Example 5: Multiple OPP tables
+Example 5: opp-supported-hw
+(example: three level hierarchy of versions: cuts, substrate and process)
 
 / {
        cpus {
@@ -426,40 +447,73 @@ Example 5: Multiple OPP tables
                        ...
 
                        cpu-supply = <&cpu_supply>
-                       operating-points-v2 = <&cpu0_opp_table_slow>, <&cpu0_opp_table_fast>;
-                       operating-points-names = "slow", "fast";
+                       operating-points-v2 = <&cpu0_opp_table_slow>;
                };
        };
 
-       cpu0_opp_table_slow: opp_table_slow {
+       opp_table {
                compatible = "operating-points-v2";
                status = "okay";
                opp-shared;
 
-               opp00 {
+               opp@600000000 {
+                       /*
+                        * Supports all substrate and process versions for 0xF
+                        * cuts, i.e. only first four cuts.
+                        */
+                       opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF>
                        opp-hz = /bits/ 64 <600000000>;
+                       opp-microvolt = <900000 915000 925000>;
                        ...
                };
 
-               opp01 {
+               opp@800000000 {
+                       /*
+                        * Supports:
+                        * - cuts: only one, 6th cut (represented by 6th bit).
+                        * - substrate: supports 16 different substrate versions
+                        * - process: supports 9 different process versions
+                        */
+                       opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0>
                        opp-hz = /bits/ 64 <800000000>;
+                       opp-microvolt = <900000 915000 925000>;
                        ...
                };
        };
+};
+
+Example 6: opp-microvolt-<name>, opp-microamp-<name>:
+(example: device with two possible microvolt ranges: slow and fast)
 
-       cpu0_opp_table_fast: opp_table_fast {
+/ {
+       cpus {
+               cpu@0 {
+                       compatible = "arm,cortex-a7";
+                       ...
+
+                       operating-points-v2 = <&cpu0_opp_table>;
+               };
+       };
+
+       cpu0_opp_table: opp_table0 {
                compatible = "operating-points-v2";
-               status = "okay";
                opp-shared;
 
-               opp10 {
+               opp@1000000000 {
                        opp-hz = /bits/ 64 <1000000000>;
-                       ...
+                       opp-microvolt-slow = <900000 915000 925000>;
+                       opp-microvolt-fast = <970000 975000 985000>;
+                       opp-microamp-slow =  <70000>;
+                       opp-microamp-fast =  <71000>;
                };
 
-               opp11 {
-                       opp-hz = /bits/ 64 <1100000000>;
-                       ...
+               opp@1200000000 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt-slow = <900000 915000 925000>, /* Supply vcc0 */
+                                             <910000 925000 935000>; /* Supply vcc1 */
+                       opp-microvolt-fast = <970000 975000 985000>, /* Supply vcc0 */
+                                            <960000 965000 975000>; /* Supply vcc1 */
+                       opp-microamp =  <70000>; /* Will be used for both slow/fast */
                };
        };
 };
index e63316239938164be150d0a9d73dac94fa9b4b92..4199ffecc0ff06bde4b26fa5f09d1fd0a9a08601 100644 (file)
@@ -9,7 +9,7 @@
     |       alpha: |  ..  |
     |         arc: | TODO |
     |         arm: |  ok  |
-    |       arm64: |  ..  |
+    |       arm64: |  ok  |
     |       avr32: | TODO |
     |    blackfin: | TODO |
     |         c6x: | TODO |
index af6816bccb439d76a7f80d43f97f4a95e7d604c2..df1d1f3c9af290aa6ffaa1584f71ba6025e54c4e 100644 (file)
@@ -9,7 +9,7 @@
     |       alpha: | TODO |
     |         arc: | TODO |
     |         arm: | TODO |
-    |       arm64: | TODO |
+    |       arm64: |  ok  |
     |       avr32: | TODO |
     |    blackfin: | TODO |
     |         c6x: | TODO |
index 0e4102ae1a61708c8df8748c529ba725e2f49a32..554f3844d4993549943caaccbf706bab7e063178 100644 (file)
@@ -3409,6 +3409,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        ro              [KNL] Mount root device read-only on boot
 
+       rodata=         [KNL]
+               on      Mark read-only kernel memory as read-only (default).
+               off     Leave read-only kernel memory writable for debugging.
+
        root=           [KNL] Root filesystem
                        See name_to_dev_t comment in init/do_mounts.c.
 
index 0a5c3290e7324f09ab01dac65f6825d22e35ea09..a33c88cd5d1d62f3fb4e377090d3d75615242b55 100644 (file)
@@ -190,8 +190,8 @@ expected to be accessed and controlled using those entries.
 Last but not least, "struct module *owner" is expected to be set to reflect
 the information carried in "THIS_MODULE".
 
-How to use
-----------
+How to use the tracer modules
+-----------------------------
 
 Before trace collection can start, a coresight sink needs to be identify.
 There is no limit on the amount of sinks (nor sources) that can be enabled at
@@ -297,3 +297,36 @@ Info                                    Tracing enabled
 Instruction     13570831        0x8026B584      E28DD00C        false   ADD      sp,sp,#0xc
 Instruction     0       0x8026B588      E8BD8000        true    LDM      sp!,{pc}
 Timestamp                                       Timestamp: 17107041535
+
+How to use the STM module
+-------------------------
+
+Using the System Trace Macrocell module is the same as the tracers - the only
+difference is that clients are driving the trace capture rather
+than the program flow through the code.
+
+As with any other CoreSight component, specifics about the STM tracer can be
+found in sysfs with more information on each entry being found in [1]:
+
+root@genericarmv8:~# ls /sys/bus/coresight/devices/20100000.stm
+enable_source   hwevent_select  port_enable     subsystem       uevent
+hwevent_enable  mgmt            port_select     traceid
+root@genericarmv8:~#
+
+Like any other source a sink needs to be identified and the STM enabled before
+being used:
+
+root@genericarmv8:~# echo 1 > /sys/bus/coresight/devices/20010000.etf/enable_sink
+root@genericarmv8:~# echo 1 > /sys/bus/coresight/devices/20100000.stm/enable_source
+
+From there user space applications can request and use channels using the devfs
+interface provided for that purpose by the generic STM API:
+
+root@genericarmv8:~# ls -l /dev/20100000.stm
+crw-------    1 root     root       10,  61 Jan  3 18:11 /dev/20100000.stm
+root@genericarmv8:~#
+
+Details on how to use the generic STM API can be found here [2].
+
+[1]. Documentation/ABI/testing/sysfs-bus-coresight-devices-stm
+[2]. Documentation/trace/stm.txt
index ab65bbecb159279675cf5bb63e64f1feabf13f44..e97e72928a5a044d451ede1d3c22e5d4a9b726a6 100644 (file)
@@ -9356,6 +9356,7 @@ F:        drivers/mmc/host/dw_mmc*
 SYSTEM TRACE MODULE CLASS
 M:     Alexander Shishkin <alexander.shishkin@linux.intel.com>
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git
 F:     Documentation/trace/stm.txt
 F:     drivers/hwtracing/stm/
 F:     include/linux/stm.h
index d5525bfc7e3e61879d278ae08b446e185c206982..9156fc303afd8d278671c6d4d277d866fdd2ad7b 100644 (file)
@@ -491,7 +491,6 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
 #endif
 
 #ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
 void set_kernel_text_rw(void);
 void set_kernel_text_ro(void);
 #else
index 194c91b610ffecfd4071da89d16b923c614bf68d..c35c349da06983b5eee05bc8bca52e526ed1bc52 100644 (file)
@@ -79,6 +79,8 @@
 #define rr_lo_hi(a1, a2) a1, a2
 #endif
 
+#define kvm_ksym_ref(kva)      (kva)
+
 #ifndef __ASSEMBLY__
 struct kvm;
 struct kvm_vcpu;
index e06fd299de0846b44b72cd037eacd05b0b2cb051..70e6d557c75f696e537fe2a48bb4cc55721a6f8c 100644 (file)
@@ -969,7 +969,7 @@ static void cpu_init_hyp_mode(void *dummy)
        pgd_ptr = kvm_mmu_get_httbr();
        stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
        hyp_stack_ptr = stack_page + PAGE_SIZE;
-       vector_ptr = (unsigned long)__kvm_hyp_vector;
+       vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
 
        __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
 
@@ -1061,7 +1061,8 @@ static int init_hyp_mode(void)
        /*
         * Map the Hyp-code called directly from the host
         */
-       err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
+       err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start),
+                                 kvm_ksym_ref(__kvm_hyp_code_end));
        if (err) {
                kvm_err("Cannot map world-switch code\n");
                goto out_free_mappings;
index b2b97e3e7babbbb37072b4185faa27a0e47760c8..a62a7b64f49c52706b8e133b1f786dc9dc34842d 100644 (file)
@@ -23,9 +23,8 @@
 #include <linux/const.h>
 #include <asm/page.h>
 
-       __PAGE_ALIGNED_DATA
-
        .globl vdso_start, vdso_end
+       .section .data..ro_after_init
        .balign PAGE_SIZE
 vdso_start:
        .incbin "arch/arm/vdso/vdso.so"
index 871f21783866d5fdb1557ec56e5f13b602ba331a..97583a1878dbbb91b1ee8f39adb8e426392324d0 100644 (file)
@@ -13,6 +13,7 @@ config ARM64
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select ARCH_WANT_FRAME_POINTERS
+       select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARM_AMBA
        select ARM_ARCH_TIMER
        select ARM_GIC
@@ -48,6 +49,7 @@ config ARM64
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
+       select HAVE_ARCH_HUGE_VMAP
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
        select HAVE_ARCH_KGDB
@@ -70,6 +72,7 @@ config ARM64
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_HW_BREAKPOINT if PERF_EVENTS
+       select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_MEMBLOCK
        select HAVE_PATA_PLATFORM
        select HAVE_PERF_EVENTS
@@ -362,6 +365,7 @@ config ARM64_ERRATUM_843419
        bool "Cortex-A53: 843419: A load or store might access an incorrect address"
        depends on MODULES
        default y
+       select ARM64_MODULE_CMODEL_LARGE
        help
          This option builds kernel modules using the large memory model in
          order to avoid the use of the ADRP instruction, which can cause
@@ -506,6 +510,9 @@ config HOTPLUG_CPU
 source kernel/Kconfig.preempt
 source kernel/Kconfig.hz
 
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+       def_bool y
+
 config ARCH_HAS_HOLES_MEMORYMODEL
        def_bool y if SPARSEMEM
 
@@ -529,9 +536,6 @@ config HW_PERF_EVENTS
 config SYS_SUPPORTS_HUGETLBFS
        def_bool y
 
-config ARCH_WANT_GENERAL_HUGETLB
-       def_bool y
-
 config ARCH_WANT_HUGE_PMD_SHARE
        def_bool y if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
 
@@ -704,10 +708,93 @@ config ARM64_LSE_ATOMICS
 
 endmenu
 
+config ARM64_UAO
+       bool "Enable support for User Access Override (UAO)"
+       default y
+       help
+         User Access Override (UAO; part of the ARMv8.2 Extensions)
+         causes the 'unprivileged' variant of the load/store instructions to
+         be overriden to be privileged.
+
+         This option changes get_user() and friends to use the 'unprivileged'
+         variant of the load/store instructions. This ensures that user-space
+         really did have access to the supplied memory. When addr_limit is
+         set to kernel memory the UAO bit will be set, allowing privileged
+         access to kernel memory.
+
+         Choosing this option will cause copy_to_user() et al to use user-space
+         memory permissions.
+
+         The feature is detected at runtime, the kernel will use the
+         regular load/store instructions if the cpu does not implement the
+         feature.
+
+config ARM64_MODULE_CMODEL_LARGE
+       bool
+
+config ARM64_MODULE_PLTS
+       bool
+       select ARM64_MODULE_CMODEL_LARGE
+       select HAVE_MOD_ARCH_SPECIFIC
+
+config RELOCATABLE
+       bool
+       help
+         This builds the kernel as a Position Independent Executable (PIE),
+         which retains all relocation metadata required to relocate the
+         kernel binary at runtime to a different virtual address than the
+         address it was linked at.
+         Since AArch64 uses the RELA relocation format, this requires a
+         relocation pass at runtime even if the kernel is loaded at the
+         same address it was linked at.
+
+config RANDOMIZE_BASE
+       bool "Randomize the address of the kernel image"
+       select ARM64_MODULE_PLTS
+       select RELOCATABLE
+       help
+         Randomizes the virtual address at which the kernel image is
+         loaded, as a security feature that deters exploit attempts
+         relying on knowledge of the location of kernel internals.
+
+         It is the bootloader's job to provide entropy, by passing a
+         random u64 value in /chosen/kaslr-seed at kernel entry.
+
+         When booting via the UEFI stub, it will invoke the firmware's
+         EFI_RNG_PROTOCOL implementation (if available) to supply entropy
+         to the kernel proper. In addition, it will randomise the physical
+         location of the kernel Image as well.
+
+         If unsure, say N.
+
+config RANDOMIZE_MODULE_REGION_FULL
+       bool "Randomize the module region independently from the core kernel"
+       depends on RANDOMIZE_BASE
+       default y
+       help
+         Randomizes the location of the module region without considering the
+         location of the core kernel. This way, it is impossible for modules
+         to leak information about the location of core kernel data structures
+         but it does imply that function calls between modules and the core
+         kernel will need to be resolved via veneers in the module PLT.
+
+         When this option is not set, the module region will be randomized over
+         a limited range that contains the [_stext, _etext] interval of the
+         core kernel, so branch relocations are always in range.
+
 endmenu
 
 menu "Boot options"
 
+config ARM64_ACPI_PARKING_PROTOCOL
+       bool "Enable support for the ARM64 ACPI parking protocol"
+       depends on ACPI
+       help
+         Enable support for the ARM64 ACPI parking protocol. If disabled
+         the kernel will not allow booting through the ARM64 ACPI parking
+         protocol even if the corresponding data is present in the ACPI
+         MADT table.
+
 config CMDLINE
        string "Default kernel command string"
        default ""
index b6c90e5006e45ae01654c3b5867c3a49910dc54e..304dcc3da06f5f5d238f77a183c940607296f31a 100644 (file)
@@ -15,6 +15,10 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 OBJCOPYFLAGS   :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
 GZFLAGS                :=-9
 
+ifneq ($(CONFIG_RELOCATABLE),)
+LDFLAGS_vmlinux                += -pie
+endif
+
 KBUILD_DEFCONFIG := defconfig
 
 # Check for binutils support for specific extensions
@@ -28,6 +32,7 @@ endif
 
 KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr)
 KBUILD_CFLAGS  += $(call cc-option, -mpc-relative-literal-loads)
+KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
 KBUILD_AFLAGS  += $(lseinstr)
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
@@ -42,10 +47,14 @@ endif
 
 CHECKFLAGS     += -D__aarch64__
 
-ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
 KBUILD_CFLAGS_MODULE   += -mcmodel=large
 endif
 
+ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
+KBUILD_LDFLAGS_MODULE  += -T $(srctree)/arch/arm64/kernel/module.lds
+endif
+
 # Default value
 head-y         := arch/arm64/kernel/head.o
 
index caafd63b8092d8102401112d811b1055f4cc3524..aee323b13802ad143e9774d2076a4b1c63731ece 100644 (file)
@@ -87,9 +87,26 @@ void __init acpi_init_cpus(void);
 static inline void acpi_init_cpus(void) { }
 #endif /* CONFIG_ACPI */
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+bool acpi_parking_protocol_valid(int cpu);
+void __init
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
+#else
+static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
+static inline void
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
+{}
+#endif
+
 static inline const char *acpi_get_enable_method(int cpu)
 {
-       return acpi_psci_present() ? "psci" : NULL;
+       if (acpi_psci_present())
+               return "psci";
+
+       if (acpi_parking_protocol_valid(cpu))
+               return "parking-protocol";
+
+       return NULL;
 }
 
 #ifdef CONFIG_ACPI_APEI
index d56ec07151570e6e498e9bf35b2fd9ccaed335fe..beccbdefa106a4ba0dd7d98537427a37d2c26517 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __ASM_ALTERNATIVE_H
 #define __ASM_ALTERNATIVE_H
 
+#include <asm/cpufeature.h>
+
 #ifndef __ASSEMBLY__
 
 #include <linux/init.h>
@@ -19,7 +21,6 @@ struct alt_instr {
 
 void __init apply_alternatives_all(void);
 void apply_alternatives(void *start, size_t length);
-void free_alternatives_memory(void);
 
 #define ALTINSTR_ENTRY(feature)                                                      \
        " .word 661b - .\n"                             /* label           */ \
@@ -64,6 +65,8 @@ void free_alternatives_memory(void);
 
 #else
 
+#include <asm/assembler.h>
+
 .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
        .word \orig_offset - .
        .word \alt_offset - .
@@ -137,6 +140,65 @@ void free_alternatives_memory(void);
        alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
 
 
+/*
+ * Generate the assembly for UAO alternatives with exception table entries.
+ * This is complicated as there is no post-increment or pair versions of the
+ * unprivileged instructions, and USER() only works for single instructions.
+ */
+#ifdef CONFIG_ARM64_UAO
+       .macro uao_ldp l, reg1, reg2, addr, post_inc
+               alternative_if_not ARM64_HAS_UAO
+8888:                  ldp     \reg1, \reg2, [\addr], \post_inc;
+8889:                  nop;
+                       nop;
+               alternative_else
+                       ldtr    \reg1, [\addr];
+                       ldtr    \reg2, [\addr, #8];
+                       add     \addr, \addr, \post_inc;
+               alternative_endif
+
+               _asm_extable    8888b,\l;
+               _asm_extable    8889b,\l;
+       .endm
+
+       .macro uao_stp l, reg1, reg2, addr, post_inc
+               alternative_if_not ARM64_HAS_UAO
+8888:                  stp     \reg1, \reg2, [\addr], \post_inc;
+8889:                  nop;
+                       nop;
+               alternative_else
+                       sttr    \reg1, [\addr];
+                       sttr    \reg2, [\addr, #8];
+                       add     \addr, \addr, \post_inc;
+               alternative_endif
+
+               _asm_extable    8888b,\l;
+               _asm_extable    8889b,\l;
+       .endm
+
+       .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
+               alternative_if_not ARM64_HAS_UAO
+8888:                  \inst   \reg, [\addr], \post_inc;
+                       nop;
+               alternative_else
+                       \alt_inst       \reg, [\addr];
+                       add             \addr, \addr, \post_inc;
+               alternative_endif
+
+               _asm_extable    8888b,\l;
+       .endm
+#else
+       .macro uao_ldp l, reg1, reg2, addr, post_inc
+               USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
+       .endm
+       .macro uao_stp l, reg1, reg2, addr, post_inc
+               USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
+       .endm
+       .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
+               USER(\l, \inst \reg, [\addr], \post_inc)
+       .endm
+#endif
+
 #endif  /*  __ASSEMBLY__  */
 
 /*
index 12eff928ef8b38dd18ae3bd157b12eb918f797a6..70f7b9e04598bdf953b56d40794f1ab5aaf966ca 100644 (file)
        dmb     \opt
        .endm
 
+/*
+ * Emit an entry into the exception table
+ */
+       .macro          _asm_extable, from, to
+       .pushsection    __ex_table, "a"
+       .align          3
+       .long           (\from - .), (\to - .)
+       .popsection
+       .endm
+
 #define USER(l, x...)                          \
 9999:  x;                                      \
-       .section __ex_table,"a";                \
-       .align  3;                              \
-       .quad   9999b,l;                        \
-       .previous
+       _asm_extable    9999b, l
 
 /*
  * Register aliases.
@@ -193,6 +200,17 @@ lr .req    x30             // link register
        str     \src, [\tmp, :lo12:\sym]
        .endm
 
+       /*
+        * @sym: The name of the per-cpu variable
+        * @reg: Result of per_cpu(sym, smp_processor_id())
+        * @tmp: scratch register
+        */
+       .macro this_cpu_ptr, sym, reg, tmp
+       adr_l   \reg, \sym
+       mrs     \tmp, tpidr_el1
+       add     \reg, \reg, \tmp
+       .endm
+
 /*
  * Annotate a function as position independent, i.e., safe to be called before
  * the kernel virtual mapping is activated.
@@ -204,4 +222,15 @@ lr .req    x30             // link register
        .size   __pi_##x, . - x;        \
        ENDPROC(x)
 
+       /*
+        * Emit a 64-bit absolute little endian symbol reference in a way that
+        * ensures that it will be resolved at build time, even when building a
+        * PIE binary. This requires cooperation from the linker script, which
+        * must emit the lo32/hi32 halves individually.
+        */
+       .macro  le64sym, sym
+       .long   \sym\()_lo32
+       .long   \sym\()_hi32
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H */
index 197e06afbf71947eb505a893e53b73be73543be9..39c1d340fec59136b8ddd6e3ac3f39354289189f 100644 (file)
@@ -36,7 +36,7 @@ static inline void atomic_andnot(int i, atomic_t *v)
        "       stclr   %w[i], %[v]\n")
        : [i] "+r" (w0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 static inline void atomic_or(int i, atomic_t *v)
@@ -48,7 +48,7 @@ static inline void atomic_or(int i, atomic_t *v)
        "       stset   %w[i], %[v]\n")
        : [i] "+r" (w0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 static inline void atomic_xor(int i, atomic_t *v)
@@ -60,7 +60,7 @@ static inline void atomic_xor(int i, atomic_t *v)
        "       steor   %w[i], %[v]\n")
        : [i] "+r" (w0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 static inline void atomic_add(int i, atomic_t *v)
@@ -72,7 +72,7 @@ static inline void atomic_add(int i, atomic_t *v)
        "       stadd   %w[i], %[v]\n")
        : [i] "+r" (w0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                          \
@@ -90,7 +90,7 @@ static inline int atomic_add_return##name(int i, atomic_t *v)         \
        "       add     %w[i], %w[i], w30")                             \
        : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
        : "r" (x1)                                                      \
-       : "x30" , ##cl);                                                \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
        return w0;                                                      \
 }
@@ -116,7 +116,7 @@ static inline void atomic_and(int i, atomic_t *v)
        "       stclr   %w[i], %[v]")
        : [i] "+r" (w0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 static inline void atomic_sub(int i, atomic_t *v)
@@ -133,7 +133,7 @@ static inline void atomic_sub(int i, atomic_t *v)
        "       stadd   %w[i], %[v]")
        : [i] "+r" (w0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)                          \
@@ -153,7 +153,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v)               \
        "       add     %w[i], %w[i], w30")                             \
        : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
        : "r" (x1)                                                      \
-       : "x30" , ##cl);                                                \
+       : __LL_SC_CLOBBERS , ##cl);                                     \
                                                                        \
        return w0;                                                      \
 }
@@ -177,7 +177,7 @@ static inline void atomic64_andnot(long i, atomic64_t *v)
        "       stclr   %[i], %[v]\n")
        : [i] "+r" (x0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 static inline void atomic64_or(long i, atomic64_t *v)
@@ -189,7 +189,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
        "       stset   %[i], %[v]\n")
        : [i] "+r" (x0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 static inline void atomic64_xor(long i, atomic64_t *v)
@@ -201,7 +201,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
        "       steor   %[i], %[v]\n")
        : [i] "+r" (x0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 static inline void atomic64_add(long i, atomic64_t *v)
@@ -213,7 +213,7 @@ static inline void atomic64_add(long i, atomic64_t *v)
        "       stadd   %[i], %[v]\n")
        : [i] "+r" (x0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)                                \
@@ -231,7 +231,7 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
        "       add     %[i], %[i], x30")                               \
        : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
        : "r" (x1)                                                      \
-       : "x30" , ##cl);                                                \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
        return x0;                                                      \
 }
@@ -257,7 +257,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
        "       stclr   %[i], %[v]")
        : [i] "+r" (x0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 static inline void atomic64_sub(long i, atomic64_t *v)
@@ -274,7 +274,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
        "       stadd   %[i], %[v]")
        : [i] "+r" (x0), [v] "+Q" (v->counter)
        : "r" (x1)
-       : "x30");
+       : __LL_SC_CLOBBERS);
 }
 
 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                                \
@@ -294,7 +294,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
        "       add     %[i], %[i], x30")                               \
        : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
        : "r" (x1)                                                      \
-       : "x30" , ##cl);                                                \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
        return x0;                                                      \
 }
@@ -330,7 +330,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
        "2:")
        : [ret] "+&r" (x0), [v] "+Q" (v->counter)
        :
-       : "x30", "cc", "memory");
+       : __LL_SC_CLOBBERS, "cc", "memory");
 
        return x0;
 }
@@ -359,7 +359,7 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,       \
        "       mov     %" #w "[ret], " #w "30")                        \
        : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)             \
        : [old] "r" (x1), [new] "r" (x2)                                \
-       : "x30" , ##cl);                                                \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
        return x0;                                                      \
 }
@@ -416,7 +416,7 @@ static inline long __cmpxchg_double##name(unsigned long old1,               \
          [v] "+Q" (*(unsigned long *)ptr)                              \
        : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
          [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
-       : "x30" , ##cl);                                                \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
        return x0;                                                      \
 }
index 81151b67b26bf61fd756540f5643d8b5ab6882c7..ebf2481889c34848be0b34158647a4f60b770090 100644 (file)
 #define MIN_FDT_ALIGN          8
 #define MAX_FDT_SIZE           SZ_2M
 
+/*
+ * arm64 requires the kernel image to placed
+ * TEXT_OFFSET bytes beyond a 2 MB aligned base
+ */
+#define MIN_KIMG_ALIGN         SZ_2M
+
 #endif
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
new file mode 100644 (file)
index 0000000..ed693c5
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BRK_IMM_H
+#define __ASM_BRK_IMM_H
+
+/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgdb are 0x400 - 0x7ff
+ * 0x100: for triggering a fault on purpose (reserved)
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
+ */
+#define FAULT_BRK_IMM                  0x100
+#define KGDB_DYN_DBG_BRK_IMM           0x400
+#define KGDB_COMPILED_DBG_BRK_IMM      0x401
+#define BUG_BRK_IMM                    0x800
+
+#endif
index 4a748ce9ba1a71241c8f13834e488cef9753dc38..561190d1588136b1f7bc207ebfd04251694ea552 100644 (file)
@@ -18,7 +18,7 @@
 #ifndef _ARCH_ARM64_ASM_BUG_H
 #define _ARCH_ARM64_ASM_BUG_H
 
-#include <asm/debug-monitors.h>
+#include <asm/brk-imm.h>
 
 #ifdef CONFIG_GENERIC_BUG
 #define HAVE_ARCH_BUG
index 54efedaf331fda55478d001d860d6137be5d08e8..22dda613f9c91bd3bcfe3a8aad435f7384795401 100644 (file)
@@ -68,6 +68,7 @@
 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
+extern void __clean_dcache_area_pou(void *addr, size_t len);
 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
 
 static inline void flush_cache_mm(struct mm_struct *mm)
@@ -155,8 +156,4 @@ int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
 
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
-#endif
-
 #endif
index 9ea611ea69df739009d0a6d432bbbedcab05284b..510c7b4044547f82750ca9295e6d35bdeb0b67bf 100644 (file)
@@ -19,7 +19,6 @@
 #define __ASM_CMPXCHG_H
 
 #include <linux/bug.h>
-#include <linux/mmdebug.h>
 
 #include <asm/atomic.h>
 #include <asm/barrier.h>
index b5e9cee4b5f81a3498a67b934dc9157e2d4cf45b..13a6103130cd7036b889250889264847b6871249 100644 (file)
@@ -36,6 +36,7 @@ struct cpuinfo_arm64 {
        u64             reg_id_aa64isar1;
        u64             reg_id_aa64mmfr0;
        u64             reg_id_aa64mmfr1;
+       u64             reg_id_aa64mmfr2;
        u64             reg_id_aa64pfr0;
        u64             reg_id_aa64pfr1;
 
index 8f271b83f9106c7c9753ce2601d3b59e1ffbdfc5..37a53fc6b384eadb7d5b755066ec9fe67717167c 100644 (file)
 #define ARM64_HAS_LSE_ATOMICS                  5
 #define ARM64_WORKAROUND_CAVIUM_23154          6
 #define ARM64_WORKAROUND_834220                        7
+#define ARM64_HAS_NO_HW_PREFETCH               8
+#define ARM64_HAS_UAO                          9
+#define ARM64_ALT_PAN_NOT_UAO                  10
 
-#define ARM64_NCAPS                            8
+#define ARM64_NCAPS                            11
 
 #ifndef __ASSEMBLY__
 
@@ -176,7 +179,7 @@ u64 read_system_reg(u32 id);
 
 static inline bool cpu_supports_mixed_endian_el0(void)
 {
-       return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+       return id_aa64mmfr0_mixed_endian_el0(read_cpuid(SYS_ID_AA64MMFR0_EL1));
 }
 
 static inline bool system_supports_mixed_endian_el0(void)
index 1a5949364ed0f43eee2be4b61c3497fe4fdbbb7b..b3a83da152a7c75d3ae3964bebb143efadcff6e2 100644 (file)
 #define MPIDR_AFFINITY_LEVEL(mpidr, level) \
        ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
 
-#define read_cpuid(reg) ({                                             \
-       u64 __val;                                                      \
-       asm("mrs        %0, " #reg : "=r" (__val));                     \
-       __val;                                                          \
-})
-
 #define MIDR_REVISION_MASK     0xf
 #define MIDR_REVISION(midr)    ((midr) & MIDR_REVISION_MASK)
 #define MIDR_PARTNUM_SHIFT     4
 #define MIDR_IMPLEMENTOR(midr) \
        (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
 
-#define MIDR_CPU_PART(imp, partnum) \
+#define MIDR_CPU_MODEL(imp, partnum) \
        (((imp)                 << MIDR_IMPLEMENTOR_SHIFT) | \
        (0xf                    << MIDR_ARCHITECTURE_SHIFT) | \
        ((partnum)              << MIDR_PARTNUM_SHIFT))
 
+#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+                            MIDR_ARCHITECTURE_MASK)
+
+#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max)           \
+({                                                                     \
+       u32 _model = (midr) & MIDR_CPU_MODEL_MASK;                      \
+       u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);     \
+                                                                       \
+       _model == (model) && rv >= (rv_min) && rv <= (rv_max);          \
+ })
+
 #define ARM_CPU_IMP_ARM                        0x41
 #define ARM_CPU_IMP_APM                        0x50
 #define ARM_CPU_IMP_CAVIUM             0x43
 
 #define CAVIUM_CPU_PART_THUNDERX       0x0A1
 
+#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_THUNDERX  MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+
 #ifndef __ASSEMBLY__
 
+#include <asm/sysreg.h>
+
+#define read_cpuid(reg) ({                                             \
+       u64 __val;                                                      \
+       asm("mrs_s      %0, " __stringify(reg) : "=r" (__val));         \
+       __val;                                                          \
+})
+
 /*
  * The CPU ID never changes at run time, so we might as well tell the
  * compiler that it's constant.  Use this function to read the CPU ID
  */
 static inline u32 __attribute_const__ read_cpuid_id(void)
 {
-       return read_cpuid(MIDR_EL1);
+       return read_cpuid(SYS_MIDR_EL1);
 }
 
 static inline u64 __attribute_const__ read_cpuid_mpidr(void)
 {
-       return read_cpuid(MPIDR_EL1);
+       return read_cpuid(SYS_MPIDR_EL1);
 }
 
 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -104,7 +121,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
 
 static inline u32 __attribute_const__ read_cpuid_cachetype(void)
 {
-       return read_cpuid(CTR_EL0);
+       return read_cpuid(SYS_CTR_EL0);
 }
 #endif /* __ASSEMBLY__ */
 
index 279c85b5ec091eafaa37fbb10d4ff59e2092a605..2fcb9b7c876c06d67c220f1eefcd0b61ef715a1c 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/errno.h>
 #include <linux/types.h>
+#include <asm/brk-imm.h>
 #include <asm/esr.h>
 #include <asm/insn.h>
 #include <asm/ptrace.h>
  */
 #define BREAK_INSTR_SIZE               AARCH64_INSN_SIZE
 
-/*
- * #imm16 values used for BRK instruction generation
- * Allowed values for kgbd are 0x400 - 0x7ff
- * 0x100: for triggering a fault on purpose (reserved)
- * 0x400: for dynamic BRK instruction
- * 0x401: for compile time BRK instruction
- * 0x800: kernel-mode BUG() and WARN() traps
- */
-#define FAULT_BRK_IMM                  0x100
-#define KGDB_DYN_DBG_BRK_IMM           0x400
-#define KGDB_COMPILED_DBG_BRK_IMM      0x401
-#define BUG_BRK_IMM                    0x800
-
 /*
  * BRK instruction encoding
  * The #imm16 value should be placed at bits[20:5] within BRK ins
index faad6df49e5b00a49f2b82f06dc48b735662e55c..24ed037f09fd32385b9ccfb0e8d380b5c0d5c7ed 100644 (file)
 #include <asm/ptrace.h>
 #include <asm/user.h>
 
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
-#define ELF_CORE_COPY_REGS(dest, regs) \
-       *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
-
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef struct user_fpsimd_state elf_fpregset_t;
-
 /*
  * AArch64 static relocation types.
  */
@@ -86,6 +77,8 @@ typedef struct user_fpsimd_state elf_fpregset_t;
 #define R_AARCH64_MOVW_PREL_G2_NC      292
 #define R_AARCH64_MOVW_PREL_G3         293
 
+#define R_AARCH64_RELATIVE             1027
+
 /*
  * These are used to set parameters in the core dumps.
  */
@@ -127,6 +120,17 @@ typedef struct user_fpsimd_state elf_fpregset_t;
  */
 #define ELF_ET_DYN_BASE        (2 * TASK_SIZE_64 / 3)
 
+#ifndef __ASSEMBLY__
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs) \
+       *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+typedef struct user_fpsimd_state elf_fpregset_t;
+
 /*
  * When the program starts, a1 contains a pointer to a function to be
  * registered with atexit, as per the SVR4 ABI.  A value of 0 means we have no
@@ -186,4 +190,6 @@ extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
 
 #endif /* CONFIG_COMPAT */
 
+#endif /* !__ASSEMBLY__ */
+
 #endif
index 309704544d22763d6348095814fbdce935172c1b..1a617d46fce93247cf42fd0cda36a1355fc89aa9 100644 (file)
@@ -62,6 +62,16 @@ enum fixed_addresses {
 
        FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
        FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+
+       /*
+        * Used for kernel page table creation, so unmapped memory may be used
+        * for tables.
+        */
+       FIX_PTE,
+       FIX_PMD,
+       FIX_PUD,
+       FIX_PGD,
+
        __end_of_fixed_addresses
 };
 
index c5534facf9416fefbec606ad8f320f023f662ce7..3c60f37e48ab51998db2c5870fe2df4427949b37 100644 (file)
@@ -28,6 +28,8 @@ struct dyn_arch_ftrace {
 
 extern unsigned long ftrace_graph_call;
 
+extern void return_to_handler(void);
+
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
        /*
index 007a69fc4f408d5f2f7e58f4070b6cb354a5e022..f2585cdd32c29832566718e99d7b5fd9c61d2322 100644 (file)
 "4:    mov     %w0, %w5\n"                                             \
 "      b       3b\n"                                                   \
 "      .popsection\n"                                                  \
-"      .pushsection __ex_table,\"a\"\n"                                \
-"      .align  3\n"                                                    \
-"      .quad   1b, 4b, 2b, 4b\n"                                       \
-"      .popsection\n"                                                  \
+       _ASM_EXTABLE(1b, 4b)                                            \
+       _ASM_EXTABLE(2b, 4b)                                            \
        ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,            \
                    CONFIG_ARM64_PAN)                                   \
        : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)       \
@@ -121,6 +119,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "      prfm    pstl1strm, %2\n"
 "1:    ldxr    %w1, %2\n"
 "      sub     %w3, %w1, %w4\n"
@@ -133,10 +132,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 "4:    mov     %w0, %w6\n"
 "      b       3b\n"
 "      .popsection\n"
-"      .pushsection __ex_table,\"a\"\n"
-"      .align  3\n"
-"      .quad   1b, 4b, 2b, 4b\n"
-"      .popsection\n"
+       _ASM_EXTABLE(1b, 4b)
+       _ASM_EXTABLE(2b, 4b)
+ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
        : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
        : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
        : "memory");
index a57601f9d17cdffb1122e2864ae5353273eb59ee..8740297dac775dac5bac2bb9260fca62df7d0fb9 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI 5
+#define NR_IPI 6
 
 typedef struct {
        unsigned int __softirq_pending;
index bb4052e85dbac913c1670fd622f66a6c47909fd8..bbc1e35aa6014c8ea83a1c06acbea83da250121d 100644 (file)
@@ -26,36 +26,7 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
        return *ptep;
 }
 
-static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
-                                  pte_t *ptep, pte_t pte)
-{
-       set_pte_at(mm, addr, ptep, pte);
-}
-
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
-                                        unsigned long addr, pte_t *ptep)
-{
-       ptep_clear_flush(vma, addr, ptep);
-}
-
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
-                                          unsigned long addr, pte_t *ptep)
-{
-       ptep_set_wrprotect(mm, addr, ptep);
-}
 
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
-                                           unsigned long addr, pte_t *ptep)
-{
-       return ptep_get_and_clear(mm, addr, ptep);
-}
-
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
-                                            unsigned long addr, pte_t *ptep,
-                                            pte_t pte, int dirty)
-{
-       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
 
 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
                                          unsigned long addr, unsigned long end,
@@ -97,4 +68,19 @@ static inline void arch_clear_hugepage_flags(struct page *page)
        clear_bit(PG_dcache_clean, &page->flags);
 }
 
+extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+                               struct page *page, int writable);
+#define arch_make_huge_pte arch_make_huge_pte
+extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                           pte_t *ptep, pte_t pte);
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                     unsigned long addr, pte_t *ptep,
+                                     pte_t pte, int dirty);
+extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                    unsigned long addr, pte_t *ptep);
+extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                   unsigned long addr, pte_t *ptep);
+extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                 unsigned long addr, pte_t *ptep);
+
 #endif /* __ASM_HUGETLB_H */
index 8e8d30684392b1065b0c5d1f65e7715e4028331c..b77197d941fc442c4bad04b185d5cf786ca9fea5 100644 (file)
@@ -1,10 +1,45 @@
 #ifndef __ASM_IRQ_H
 #define __ASM_IRQ_H
 
+#define IRQ_STACK_SIZE                 THREAD_SIZE
+#define IRQ_STACK_START_SP             THREAD_START_SP
+
+#ifndef __ASSEMBLER__
+
+#include <linux/percpu.h>
+
 #include <asm-generic/irq.h>
+#include <asm/thread_info.h>
 
 struct pt_regs;
 
+DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
+
+/*
+ * The highest address on the stack, and the first to be used. Used to
+ * find the dummy-stack frame put down by el?_irq() in entry.S, which
+ * is structured as follows:
+ *
+ *       ------------
+ *       |          |  <- irq_stack_ptr
+ *   top ------------
+ *       |   x19    | <- irq_stack_ptr - 0x08
+ *       ------------
+ *       |   x29    | <- irq_stack_ptr - 0x10
+ *       ------------
+ *
+ * where x19 holds a copy of the task stack pointer where the struct pt_regs
+ * from kernel_entry can be found.
+ *
+ */
+#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
+
+/*
+ * The offset from irq_stack_ptr where entry.S will store the original
+ * stack pointer. Used by unwind_frame() and dump_backtrace().
+ */
+#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
+
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 
 static inline int nr_legacy_irqs(void)
@@ -12,4 +47,14 @@ static inline int nr_legacy_irqs(void)
        return 0;
 }
 
+static inline bool on_irq_stack(unsigned long sp, int cpu)
+{
+       /* variable names the same as kernel/stacktrace.c */
+       unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
+       unsigned long high = low + IRQ_STACK_START_SP;
+
+       return (low <= sp && sp <= high);
+}
+
+#endif /* !__ASSEMBLER__ */
 #endif
index 2774fa384c47f27b4e936644cef0e980f7fbcfe7..71ad0f93eb7153226a43d78e909d3e7c3690bf92 100644 (file)
@@ -7,13 +7,14 @@
 
 #include <linux/linkage.h>
 #include <asm/memory.h>
+#include <asm/pgtable-types.h>
 
 /*
  * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
  * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
  */
 #define KASAN_SHADOW_START      (VA_START)
-#define KASAN_SHADOW_END        (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
+#define KASAN_SHADOW_END        (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
 
 /*
  * This value is used to map an address to the corresponding shadow
 #define KASAN_SHADOW_OFFSET     (KASAN_SHADOW_END - (1ULL << (64 - 3)))
 
 void kasan_init(void);
+void kasan_copy_shadow(pgd_t *pgdir);
 asmlinkage void kasan_early_init(void);
 
 #else
 static inline void kasan_init(void) { }
+static inline void kasan_copy_shadow(pgd_t *pgdir) { }
 #endif
 
 #endif
index a459714ee29e38fbf81f2061026933736ed1cbfc..5c6375d8528bb8ddd313bfa2911f7a0d77819028 100644 (file)
 #define SWAPPER_MM_MMUFLAGS    (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
 #endif
 
+/*
+ * To make optimal use of block mappings when laying out the linear
+ * mapping, round down the base of physical memory to a size that can
+ * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
+ * (64k granule), or a multiple that can be mapped using contiguous bits
+ * in the page tables: 32 * PMD_SIZE (16k granule)
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define ARM64_MEMSTART_ALIGN   SZ_512M
+#else
+#define ARM64_MEMSTART_ALIGN   SZ_1G
+#endif
 
 #endif /* __ASM_KERNEL_PGTABLE_H */
index 5e377101f91948f9ee711bea3f7659c86010301c..419bc6661b5c44198f03a7f2b80c59a3f17e8fb3 100644 (file)
 #define KVM_ARM64_DEBUG_DIRTY_SHIFT    0
 #define KVM_ARM64_DEBUG_DIRTY          (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
 
+#define kvm_ksym_ref(sym)              phys_to_virt((u64)&sym - kimage_voffset)
+
 #ifndef __ASSEMBLY__
 struct kvm;
 struct kvm_vcpu;
index a35ce7266aac3688fa6460bace61f90477448aa6..90c6368ad7c859bbc101d88f5273d33380c8d35a 100644 (file)
@@ -222,7 +222,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 
-u64 kvm_call_hyp(void *hypfn, ...);
+u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
@@ -243,8 +243,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
         * Call initialization code, and switch to the full blown
         * HYP code.
         */
-       kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
-                    hyp_stack_ptr, vector_ptr);
+       __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+                      hyp_stack_ptr, vector_ptr);
 }
 
 static inline void kvm_arch_hardware_disable(void) {}
@@ -258,4 +258,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
 
+#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
 #endif /* __ARM64_KVM_HOST_H__ */
index 3de42d68611df91ba6d46e32c197f700bb52bf52..23acc00be32d019a9f0f71b75153b5b32996b083 100644 (file)
@@ -26,6 +26,7 @@ __asm__(".arch_extension      lse");
 
 /* Macro for constructing calls to out-of-line ll/sc atomics */
 #define __LL_SC_CALL(op)       "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
+#define __LL_SC_CLOBBERS       "x16", "x17", "x30"
 
 /* In-line patching at runtime */
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)                               \
index 853953cd1f0813fd562b68b7cdddd95582f1e392..12f8a00fb3f1767a645a04358dcaca08fd4f6b43 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/compiler.h>
 #include <linux/const.h>
 #include <linux/types.h>
+#include <asm/bug.h>
 #include <asm/sizes.h>
 
 /*
  * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
- * The module space lives between the addresses given by TASK_SIZE
- * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  */
 #define VA_BITS                        (CONFIG_ARM64_VA_BITS)
 #define VA_START               (UL(0xffffffffffffffff) << VA_BITS)
 #define PAGE_OFFSET            (UL(0xffffffffffffffff) << (VA_BITS - 1))
-#define MODULES_END            (PAGE_OFFSET)
-#define MODULES_VADDR          (MODULES_END - SZ_64M)
-#define PCI_IO_END             (MODULES_VADDR - SZ_2M)
+#define KIMAGE_VADDR           (MODULES_END)
+#define MODULES_END            (MODULES_VADDR + MODULES_VSIZE)
+#define MODULES_VADDR          (VA_START + KASAN_SHADOW_SIZE)
+#define MODULES_VSIZE          (SZ_128M)
+#define PCI_IO_END             (PAGE_OFFSET - SZ_2M)
 #define PCI_IO_START           (PCI_IO_END - PCI_IO_SIZE)
 #define FIXADDR_TOP            (PCI_IO_START - SZ_2M)
 #define TASK_SIZE_64           (UL(1) << VA_BITS)
 
 #define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 4))
 
+/*
+ * The size of the KASAN shadow region. This should be 1/8th of the
+ * size of the entire kernel virtual address space.
+ */
+#ifdef CONFIG_KASAN
+#define KASAN_SHADOW_SIZE      (UL(1) << (VA_BITS - 3))
+#else
+#define KASAN_SHADOW_SIZE      (0)
+#endif
+
 /*
  * Physical vs virtual RAM address space conversion.  These are
  * private definitions which should NOT be used outside memory.h
  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
  */
-#define __virt_to_phys(x)      (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
-#define __phys_to_virt(x)      ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
+#define __virt_to_phys(x) ({                                           \
+       phys_addr_t __x = (phys_addr_t)(x);                             \
+       __x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET :   \
+                                (__x - kimage_voffset); })
+
+#define __phys_to_virt(x)      ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+#define __phys_to_kimg(x)      ((unsigned long)((x) + kimage_voffset))
 
 /*
  * Convert a page to/from a physical address
 #define MT_S2_NORMAL           0xf
 #define MT_S2_DEVICE_nGnRE     0x1
 
+#ifdef CONFIG_ARM64_4K_PAGES
+#define IOREMAP_MAX_ORDER      (PUD_SHIFT)
+#else
+#define IOREMAP_MAX_ORDER      (PMD_SHIFT)
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#define __early_init_dt_declare_initrd(__start, __end)                 \
+       do {                                                            \
+               initrd_start = (__start);                               \
+               initrd_end = (__end);                                   \
+       } while (0)
+#endif
+
 #ifndef __ASSEMBLY__
 
-extern phys_addr_t             memstart_addr;
+#include <linux/bitops.h>
+#include <linux/mmdebug.h>
+
+extern s64                     memstart_addr;
 /* PHYS_OFFSET - the physical address of the start of memory. */
-#define PHYS_OFFSET            ({ memstart_addr; })
+#define PHYS_OFFSET            ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
+
+/* the virtual base of the kernel image (minus TEXT_OFFSET) */
+extern u64                     kimage_vaddr;
+
+/* the offset between the kernel virtual and physical mappings */
+extern u64                     kimage_voffset;
 
 /*
- * The maximum physical address that the linear direct mapping
- * of system RAM can cover. (PAGE_OFFSET can be interpreted as
- * a 2's complement signed quantity and negated to derive the
- * maximum size of the linear mapping.)
+ * Allow all memory at the discovery stage. We will clip it later.
  */
-#define MAX_MEMBLOCK_ADDR      ({ memstart_addr - PAGE_OFFSET - 1; })
+#define MIN_MEMBLOCK_ADDR      0
+#define MAX_MEMBLOCK_ADDR      U64_MAX
 
 /*
  * PFNs are used to describe any physical page; this means
index 24165784b8038b732ea568d1e74fd8c0a699b914..a00f7cf35bbd4d80ce045bfeb0cbb6bd061aeaaa 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
 #include <asm/pgtable.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_PID_IN_CONTEXTIDR
 static inline void contextidr_thread_switch(struct task_struct *next)
@@ -48,7 +49,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
  */
 static inline void cpu_set_reserved_ttbr0(void)
 {
-       unsigned long ttbr = page_to_phys(empty_zero_page);
+       unsigned long ttbr = virt_to_phys(empty_zero_page);
 
        asm(
        "       msr     ttbr0_el1, %0                   // set TTBR0\n"
@@ -73,7 +74,7 @@ static inline bool __cpu_uses_extended_idmap(void)
 /*
  * Set TCR.T0SZ to its default value (based on VA_BITS)
  */
-static inline void cpu_set_default_tcr_t0sz(void)
+static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
 {
        unsigned long tcr;
 
@@ -86,7 +87,62 @@ static inline void cpu_set_default_tcr_t0sz(void)
        "       msr     tcr_el1, %0     ;"
        "       isb"
        : "=&r" (tcr)
-       : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+       : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+}
+
+#define cpu_set_default_tcr_t0sz()     __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
+#define cpu_set_idmap_tcr_t0sz()       __cpu_set_tcr_t0sz(idmap_t0sz)
+
+/*
+ * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
+ *
+ * The idmap lives in the same VA range as userspace, but uses global entries
+ * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
+ * speculative TLB fetches, we must temporarily install the reserved page
+ * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
+ *
+ * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
+ * which should not be installed in TTBR0_EL1. In this case we can leave the
+ * reserved page tables in place.
+ */
+static inline void cpu_uninstall_idmap(void)
+{
+       struct mm_struct *mm = current->active_mm;
+
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       cpu_set_default_tcr_t0sz();
+
+       if (mm != &init_mm)
+               cpu_switch_mm(mm->pgd, mm);
+}
+
+static inline void cpu_install_idmap(void)
+{
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       cpu_set_idmap_tcr_t0sz();
+
+       cpu_switch_mm(idmap_pg_dir, &init_mm);
+}
+
+/*
+ * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
+ * avoiding the possibility of conflicting TLB entries being allocated.
+ */
+static inline void cpu_replace_ttbr1(pgd_t *pgd)
+{
+       typedef void (ttbr_replace_func)(phys_addr_t);
+       extern ttbr_replace_func idmap_cpu_replace_ttbr1;
+       ttbr_replace_func *replace_phys;
+
+       phys_addr_t pgd_phys = virt_to_phys(pgd);
+
+       replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
+
+       cpu_install_idmap();
+       replace_phys(pgd_phys);
+       cpu_uninstall_idmap();
 }
 
 /*
index e80e232b730e2d29f77dba57bfa91ad4439f9781..e12af6754634b3d2aa031ae23ce25228dc766cfb 100644 (file)
 
 #define MODULE_ARCH_VERMAGIC   "aarch64"
 
+#ifdef CONFIG_ARM64_MODULE_PLTS
+struct mod_arch_specific {
+       struct elf64_shdr       *plt;
+       int                     plt_num_entries;
+       int                     plt_max_entries;
+};
+#endif
+
+u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+                         Elf64_Sym *sym);
+
+#ifdef CONFIG_RANDOMIZE_BASE
+extern u64 module_alloc_base;
+#else
+#define module_alloc_base      ((u64)_etext - MODULES_VSIZE)
+#endif
+
 #endif /* __ASM_MODULE_H */
index c15053902942e0a3a34ba4882545c5b6d163c23c..ff98585d085aa5737c9c17478555f22b11261f2f 100644 (file)
@@ -42,11 +42,20 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        free_page((unsigned long)pmd);
 }
 
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
 {
-       set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+       set_pud(pud, __pud(pmd | prot));
 }
 
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+}
+#else
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+{
+       BUILD_BUG();
+}
 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
 
 #if CONFIG_PGTABLE_LEVELS > 3
@@ -62,11 +71,20 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
        free_page((unsigned long)pud);
 }
 
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
 {
-       set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
+       set_pgd(pgdp, __pgd(pud | prot));
 }
 
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+       __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+}
+#else
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+{
+       BUILD_BUG();
+}
 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
index b9da9545b442cecde8119f1d2f10625483c575e0..9786f770088df41e919921b3a18024f045bfd707 100644 (file)
 /*
  * Contiguous page definitions.
  */
-#define CONT_PTES              (_AC(1, UL) << CONT_SHIFT)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define CONT_PTE_SHIFT         5
+#define CONT_PMD_SHIFT         5
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define CONT_PTE_SHIFT         7
+#define CONT_PMD_SHIFT         5
+#else
+#define CONT_PTE_SHIFT         4
+#define CONT_PMD_SHIFT         4
+#endif
+
+#define CONT_PTES              (1 << CONT_PTE_SHIFT)
+#define CONT_PTE_SIZE          (CONT_PTES * PAGE_SIZE)
+#define CONT_PTE_MASK          (~(CONT_PTE_SIZE - 1))
+#define CONT_PMDS              (1 << CONT_PMD_SHIFT)
+#define CONT_PMD_SIZE          (CONT_PMDS * PMD_SIZE)
+#define CONT_PMD_MASK          (~(CONT_PMD_SIZE - 1))
 /* the the numerical offset of the PTE within a range of CONT_PTES */
 #define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
 
index 67c2ad6d33b7b91914ef56af3ca04f67ffb1b48c..9a09ccf7122dc529c5e508b341f268596b614501 100644 (file)
  *
  * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
  *     (rounded up to PUD_SIZE).
- * VMALLOC_START: beginning of the kernel VA space
+ * VMALLOC_START: beginning of the kernel vmalloc space
  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
  *     fixed mappings and modules
  */
 #define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
 
-#ifndef CONFIG_KASAN
-#define VMALLOC_START          (VA_START)
-#else
-#include <asm/kasan.h>
-#define VMALLOC_START          (KASAN_SHADOW_END + SZ_64K)
-#endif
-
+#define VMALLOC_START          (MODULES_END)
 #define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define VMEMMAP_START          (VMALLOC_END + SZ_64K)
@@ -59,6 +53,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/fixmap.h>
 #include <linux/mmdebug.h>
 
 extern void __pte_error(const char *file, int line, unsigned long val);
@@ -123,8 +118,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr)       (empty_zero_page)
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr)       virt_to_page(empty_zero_page)
 
 #define pte_ERROR(pte)         __pte_error(__FILE__, __LINE__, pte_val(pte))
 
@@ -136,16 +131,6 @@ extern struct page *empty_zero_page;
 #define pte_clear(mm,addr,ptep)        set_pte(ptep, __pte(0))
 #define pte_page(pte)          (pfn_to_page(pte_pfn(pte)))
 
-/* Find an entry in the third-level page table. */
-#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(dir,addr)    (pmd_page_vaddr(*(dir)) + pte_index(addr))
-
-#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte)                 do { } while (0)
-#define pte_unmap_nested(pte)          do { } while (0)
-
 /*
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
@@ -168,6 +153,16 @@ extern struct page *empty_zero_page;
 #define pte_valid(pte)         (!!(pte_val(pte) & PTE_VALID))
 #define pte_valid_not_user(pte) \
        ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+#define pte_valid_young(pte) \
+       ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+
+/*
+ * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
+ * so that we don't erroneously return false for pages that have been
+ * remapped as PROT_NONE but are yet to be flushed from the TLB.
+ */
+#define pte_accessible(mm, pte)        \
+       (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
 
 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
 {
@@ -218,7 +213,8 @@ static inline pte_t pte_mkspecial(pte_t pte)
 
 static inline pte_t pte_mkcont(pte_t pte)
 {
-       return set_pte_bit(pte, __pgprot(PTE_CONT));
+       pte = set_pte_bit(pte, __pgprot(PTE_CONT));
+       return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
 }
 
 static inline pte_t pte_mknoncont(pte_t pte)
@@ -226,6 +222,11 @@ static inline pte_t pte_mknoncont(pte_t pte)
        return clear_pte_bit(pte, __pgprot(PTE_CONT));
 }
 
+static inline pmd_t pmd_mkcont(pmd_t pmd)
+{
+       return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
+}
+
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
        *ptep = pte;
@@ -299,7 +300,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 /*
  * Hugetlb definitions.
  */
-#define HUGE_MAX_HSTATE                2
+#define HUGE_MAX_HSTATE                4
 #define HPAGE_SHIFT            PMD_SHIFT
 #define HPAGE_SIZE             (_AC(1, UL) << HPAGE_SHIFT)
 #define HPAGE_MASK             (~(HPAGE_SIZE - 1))
@@ -354,6 +355,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
 #define pmd_mksplitting(pmd)   pte_pmd(pte_mkspecial(pmd_pte(pmd)))
 #define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
 #define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkclean(pmd)       pte_pmd(pte_mkclean(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
 #define pmd_mknotpresent(pmd)  (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
@@ -425,13 +427,31 @@ static inline void pmd_clear(pmd_t *pmdp)
        set_pmd(pmdp, __pmd(0));
 }
 
-static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
 {
-       return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+       return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
+/* Find an entry in the third-level page table. */
+#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_phys(dir,addr)      (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_kernel(dir,addr)    ((pte_t *)__va(pte_offset_phys((dir), (addr))))
+
+#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)                 do { } while (0)
+#define pte_unmap_nested(pte)          do { } while (0)
+
+#define pte_set_fixmap(addr)           ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
+#define pte_set_fixmap_offset(pmd, addr)       pte_set_fixmap(pte_offset_phys(pmd, addr))
+#define pte_clear_fixmap()             clear_fixmap(FIX_PTE)
+
 #define pmd_page(pmd)          pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pte_offset_kimg(dir,addr)      ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -458,21 +478,37 @@ static inline void pud_clear(pud_t *pudp)
        set_pud(pudp, __pud(0));
 }
 
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline phys_addr_t pud_page_paddr(pud_t pud)
 {
-       return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+       return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
 /* Find an entry in the second-level page table. */
 #define pmd_index(addr)                (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
-       return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
+#define pmd_offset_phys(dir, addr)     (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
+#define pmd_offset(dir, addr)          ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
+
+#define pmd_set_fixmap(addr)           ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
+#define pmd_set_fixmap_offset(pud, addr)       pmd_set_fixmap(pmd_offset_phys(pud, addr))
+#define pmd_clear_fixmap()             clear_fixmap(FIX_PMD)
 
 #define pud_page(pud)          pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pmd_offset_kimg(dir,addr)      ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
+
+#else
+
+#define pud_page_paddr(pud)    ({ BUILD_BUG(); 0; })
+
+/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
+#define pmd_set_fixmap(addr)           NULL
+#define pmd_set_fixmap_offset(pudp, addr)      ((pmd_t *)pudp)
+#define pmd_clear_fixmap()
+
+#define pmd_offset_kimg(dir,addr)      ((pmd_t *)dir)
+
 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
 
 #if CONFIG_PGTABLE_LEVELS > 3
@@ -494,21 +530,37 @@ static inline void pgd_clear(pgd_t *pgdp)
        set_pgd(pgdp, __pgd(0));
 }
 
-static inline pud_t *pgd_page_vaddr(pgd_t pgd)
+static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
 {
-       return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+       return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
 /* Find an entry in the frst-level page table. */
 #define pud_index(addr)                (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
 
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
-{
-       return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
-}
+#define pud_offset_phys(dir, addr)     (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
+#define pud_offset(dir, addr)          ((pud_t *)__va(pud_offset_phys((dir), (addr))))
+
+#define pud_set_fixmap(addr)           ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
+#define pud_set_fixmap_offset(pgd, addr)       pud_set_fixmap(pud_offset_phys(pgd, addr))
+#define pud_clear_fixmap()             clear_fixmap(FIX_PUD)
 
 #define pgd_page(pgd)          pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pud_offset_kimg(dir,addr)      ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
+
+#else
+
+#define pgd_page_paddr(pgd)    ({ BUILD_BUG(); 0;})
+
+/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
+#define pud_set_fixmap(addr)           NULL
+#define pud_set_fixmap_offset(pgdp, addr)      ((pud_t *)pgdp)
+#define pud_clear_fixmap()
+
+#define pud_offset_kimg(dir,addr)      ((pud_t *)dir)
+
 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 
 #define pgd_ERROR(pgd)         __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -516,11 +568,16 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
 /* to find an entry in a page-table-directory */
 #define pgd_index(addr)                (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 
-#define pgd_offset(mm, addr)   ((mm)->pgd+pgd_index(addr))
+#define pgd_offset_raw(pgd, addr)      ((pgd) + pgd_index(addr))
+
+#define pgd_offset(mm, addr)   (pgd_offset_raw((mm)->pgd, (addr)))
 
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 
+#define pgd_set_fixmap(addr)   ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
+#define pgd_clear_fixmap()     clear_fixmap(FIX_PGD)
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -681,7 +738,8 @@ extern int kern_addr_valid(unsigned long addr);
 
 #include <asm-generic/pgtable.h>
 
-#define pgtable_cache_init() do { } while (0)
+void pgd_cache_init(void);
+#define pgtable_cache_init     pgd_cache_init
 
 /*
  * On AArch64, the cache coherency is handled via the set_pte_at() function.
index 4acb7ca94fcd9c05569f3103ab09097c19ea72d5..cef1cf398356f1f61ceea854fc564f6df1d316cc 100644 (file)
 
 #include <linux/string.h>
 
+#include <asm/alternative.h>
 #include <asm/fpsimd.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/lse.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
@@ -177,9 +179,11 @@ static inline void prefetchw(const void *ptr)
 }
 
 #define ARCH_HAS_SPINLOCK_PREFETCH
-static inline void spin_lock_prefetch(const void *x)
+static inline void spin_lock_prefetch(const void *ptr)
 {
-       prefetchw(x);
+       asm volatile(ARM64_LSE_ATOMIC_INSN(
+                    "prfm pstl1strm, %a0",
+                    "nop") : : "p" (ptr));
 }
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -187,5 +191,6 @@ static inline void spin_lock_prefetch(const void *x)
 #endif
 
 void cpu_enable_pan(void *__unused);
+void cpu_enable_uao(void *__unused);
 
 #endif /* __ASM_PROCESSOR_H */
index 4df608a8459e27c657055edd5ca519b34e755b80..e368a55ebd22d0c3dab4a690afdd33d62c76ef73 100644 (file)
@@ -21,7 +21,7 @@
  * alignment value. Since we don't have aliasing D-caches, the rest of
  * the time we can safely use PAGE_SIZE.
  */
-#define COMPAT_SHMLBA  0x4000
+#define COMPAT_SHMLBA  (4 * PAGE_SIZE)
 
 #include <asm-generic/shmparam.h>
 
index d9c3d6a6100ac5d68e9b412113daccd1e43d8371..2013a4dc5124a55c5c304306b41908f16a0e5d64 100644 (file)
@@ -64,6 +64,15 @@ extern void secondary_entry(void);
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+#else
+static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+       BUILD_BUG();
+}
+#endif
+
 extern int __cpu_disable(void);
 
 extern void __cpu_die(unsigned int cpu);
index c85e96d174a5fbd4764adb748b9bc11c70479617..fc9682bfe0020caebf99412131fb760d5b8b870d 100644 (file)
  * The memory barriers are implicit with the load-acquire and store-release
  * instructions.
  */
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       unsigned int tmp;
+       arch_spinlock_t lockval;
 
-#define arch_spin_unlock_wait(lock) \
-       do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+       asm volatile(
+"      sevl\n"
+"1:    wfe\n"
+"2:    ldaxr   %w0, %2\n"
+"      eor     %w1, %w0, %w0, ror #16\n"
+"      cbnz    %w1, 1b\n"
+       ARM64_LSE_ATOMIC_INSN(
+       /* LL/SC */
+"      stxr    %w1, %w0, %2\n"
+"      cbnz    %w1, 2b\n", /* Serialise against any concurrent lockers */
+       /* LSE atomics */
+"      nop\n"
+"      nop\n")
+       : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+       :
+       : "memory");
+}
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
index 7318f6d54aa949ca906a990cce357a4cdd1358db..801a16dbbdf622d5239cf61be4f67eccab5c1e09 100644 (file)
 #ifndef __ASM_STACKTRACE_H
 #define __ASM_STACKTRACE_H
 
+struct task_struct;
+
 struct stackframe {
        unsigned long fp;
        unsigned long sp;
        unsigned long pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       unsigned int graph;
+#endif
 };
 
-extern int unwind_frame(struct stackframe *frame);
-extern void walk_stackframe(struct stackframe *frame,
+extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
+extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
                            int (*fn)(struct stackframe *, void *), void *data);
 
 #endif /* __ASM_STACKTRACE_H */
index d48ab5b41f521c23819c0b3927a9ea0f73db242c..b9fd8ec790336569ee8fdedb4573e25b3045c29b 100644 (file)
 
 #define SYS_ID_AA64MMFR0_EL1           sys_reg(3, 0, 0, 7, 0)
 #define SYS_ID_AA64MMFR1_EL1           sys_reg(3, 0, 0, 7, 1)
+#define SYS_ID_AA64MMFR2_EL1           sys_reg(3, 0, 0, 7, 2)
 
 #define SYS_CNTFRQ_EL0                 sys_reg(3, 3, 14, 0, 0)
 #define SYS_CTR_EL0                    sys_reg(3, 3, 0, 0, 1)
 #define SYS_DCZID_EL0                  sys_reg(3, 3, 0, 0, 7)
 
 #define REG_PSTATE_PAN_IMM             sys_reg(0, 0, 4, 0, 4)
+#define REG_PSTATE_UAO_IMM             sys_reg(0, 0, 4, 0, 3)
 
 #define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
                                     (!!x)<<8 | 0x1f)
+#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
+                                    (!!x)<<8 | 0x1f)
 
 /* SCTLR_EL1 */
 #define SCTLR_EL1_CP15BEN      (0x1 << 5)
 #define ID_AA64MMFR1_VMIDBITS_SHIFT    4
 #define ID_AA64MMFR1_HADBS_SHIFT       0
 
+/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_UAO_SHIFT         4
+
 /* id_aa64dfr0 */
 #define ID_AA64DFR0_CTX_CMPS_SHIFT     28
 #define ID_AA64DFR0_WRPS_SHIFT         20
 #ifdef __ASSEMBLY__
 
        .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
-       .equ    __reg_num_x\num, \num
+       .equ    .L__reg_num_x\num, \num
        .endr
-       .equ    __reg_num_xzr, 31
+       .equ    .L__reg_num_xzr, 31
 
        .macro  mrs_s, rt, sreg
-       .inst   0xd5200000|(\sreg)|(__reg_num_\rt)
+       .inst   0xd5200000|(\sreg)|(.L__reg_num_\rt)
        .endm
 
        .macro  msr_s, sreg, rt
-       .inst   0xd5000000|(\sreg)|(__reg_num_\rt)
+       .inst   0xd5000000|(\sreg)|(.L__reg_num_\rt)
        .endm
 
 #else
 
 asm(
 "      .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
-"      .equ    __reg_num_x\\num, \\num\n"
+"      .equ    .L__reg_num_x\\num, \\num\n"
 "      .endr\n"
-"      .equ    __reg_num_xzr, 31\n"
+"      .equ    .L__reg_num_xzr, 31\n"
 "\n"
 "      .macro  mrs_s, rt, sreg\n"
-"      .inst   0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
+"      .inst   0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n"
 "      .endm\n"
 "\n"
 "      .macro  msr_s, sreg, rt\n"
-"      .inst   0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
+"      .inst   0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n"
 "      .endm\n"
 );
 
index 90c7ff233735d7691bf3b34b7075dffd07dbf939..abd64bd1f6d9f0160a3122555cf23be1a30f87eb 100644 (file)
@@ -73,10 +73,16 @@ register unsigned long current_stack_pointer asm ("sp");
  */
 static inline struct thread_info *current_thread_info(void) __attribute_const__;
 
+/*
+ * struct thread_info can be accessed directly via sp_el0.
+ */
 static inline struct thread_info *current_thread_info(void)
 {
-       return (struct thread_info *)
-               (current_stack_pointer & ~(THREAD_SIZE - 1));
+       unsigned long sp_el0;
+
+       asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+       return (struct thread_info *)sp_el0;
 }
 
 #define thread_saved_pc(tsk)   \
index b2ede967fe7d49258c56af51e73786efb5d1a420..0685d74572af788b05d44658c7dba7e4fc3742ba 100644 (file)
 #define VERIFY_WRITE 1
 
 /*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * The exception table consists of pairs of relative offsets: the first
+ * is the relative offset to an instruction that is allowed to fault,
+ * and the second is the relative offset at which the program should
+ * continue. No registers are modified, so it is entirely up to the
+ * continuation code to figure out what to do.
  *
  * All the routines below use bits of fixup code that are out of line
  * with the main instruction path.  This means when everything is well,
 
 struct exception_table_entry
 {
-       unsigned long insn, fixup;
+       int insn, fixup;
 };
 
+#define ARCH_HAS_RELATIVE_EXTABLE
+
 extern int fixup_exception(struct pt_regs *regs);
 
 #define KERNEL_DS      (-1UL)
@@ -64,6 +66,16 @@ extern int fixup_exception(struct pt_regs *regs);
 static inline void set_fs(mm_segment_t fs)
 {
        current_thread_info()->addr_limit = fs;
+
+       /*
+        * Enable/disable UAO so that copy_to_user() etc can access
+        * kernel memory with the unprivileged instructions.
+        */
+       if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
+               asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
+       else
+               asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
+                               CONFIG_ARM64_UAO));
 }
 
 #define segment_eq(a, b)       ((a) == (b))
@@ -105,6 +117,12 @@ static inline void set_fs(mm_segment_t fs)
 #define access_ok(type, addr, size)    __range_ok(addr, size)
 #define user_addr_max                  get_fs
 
+#define _ASM_EXTABLE(from, to)                                         \
+       "       .pushsection    __ex_table, \"a\"\n"                    \
+       "       .align          3\n"                                    \
+       "       .long           (" #from " - .), (" #to " - .)\n"       \
+       "       .popsection\n"
+
 /*
  * The "__xxx" versions of the user access functions do not verify the address
  * space - it must have been done previously with a separate "access_ok()"
@@ -113,9 +131,10 @@ static inline void set_fs(mm_segment_t fs)
  * The "__xxx_error" versions set the third argument to -EFAULT if an error
  * occurs, and leave it unchanged on success.
  */
-#define __get_user_asm(instr, reg, x, addr, err)                       \
+#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature)   \
        asm volatile(                                                   \
-       "1:     " instr "       " reg "1, [%2]\n"                       \
+       "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
+                       alt_instr " " reg "1, [%2]\n", feature)         \
        "2:\n"                                                          \
        "       .section .fixup, \"ax\"\n"                              \
        "       .align  2\n"                                            \
@@ -123,10 +142,7 @@ static inline void set_fs(mm_segment_t fs)
        "       mov     %1, #0\n"                                       \
        "       b       2b\n"                                           \
        "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .quad   1b, 3b\n"                                       \
-       "       .previous"                                              \
+       _ASM_EXTABLE(1b, 3b)                                            \
        : "+r" (err), "=&r" (x)                                         \
        : "r" (addr), "i" (-EFAULT))
 
@@ -134,26 +150,30 @@ static inline void set_fs(mm_segment_t fs)
 do {                                                                   \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,        \
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
                        CONFIG_ARM64_PAN));                             \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));   \
+               __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
+                              (err), ARM64_HAS_UAO);                   \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm("ldrh", "%w", __gu_val, (ptr), (err));   \
+               __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr),  \
+                              (err), ARM64_HAS_UAO);                   \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm("ldr", "%w", __gu_val, (ptr), (err));    \
+               __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr),    \
+                              (err), ARM64_HAS_UAO);                   \
                break;                                                  \
        case 8:                                                         \
-               __get_user_asm("ldr", "%",  __gu_val, (ptr), (err));    \
+               __get_user_asm("ldr", "ldtr", "%",  __gu_val, (ptr),    \
+                              (err), ARM64_HAS_UAO);                   \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
        }                                                               \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,        \
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
                        CONFIG_ARM64_PAN));                             \
 } while (0)
 
@@ -181,19 +201,17 @@ do {                                                                      \
                ((x) = 0, -EFAULT);                                     \
 })
 
-#define __put_user_asm(instr, reg, x, addr, err)                       \
+#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)   \
        asm volatile(                                                   \
-       "1:     " instr "       " reg "1, [%2]\n"                       \
+       "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
+                       alt_instr " " reg "1, [%2]\n", feature)         \
        "2:\n"                                                          \
        "       .section .fixup,\"ax\"\n"                               \
        "       .align  2\n"                                            \
        "3:     mov     %w0, %3\n"                                      \
        "       b       2b\n"                                           \
        "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .quad   1b, 3b\n"                                       \
-       "       .previous"                                              \
+       _ASM_EXTABLE(1b, 3b)                                            \
        : "+r" (err)                                                    \
        : "r" (x), "r" (addr), "i" (-EFAULT))
 
@@ -201,25 +219,29 @@ do {                                                                      \
 do {                                                                   \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,        \
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
                        CONFIG_ARM64_PAN));                             \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __put_user_asm("strb", "%w", __pu_val, (ptr), (err));   \
+               __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),  \
+                              (err), ARM64_HAS_UAO);                   \
                break;                                                  \
        case 2:                                                         \
-               __put_user_asm("strh", "%w", __pu_val, (ptr), (err));   \
+               __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr),  \
+                              (err), ARM64_HAS_UAO);                   \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm("str",  "%w", __pu_val, (ptr), (err));   \
+               __put_user_asm("str", "sttr", "%w", __pu_val, (ptr),    \
+                              (err), ARM64_HAS_UAO);                   \
                break;                                                  \
        case 8:                                                         \
-               __put_user_asm("str",  "%", __pu_val, (ptr), (err));    \
+               __put_user_asm("str", "sttr", "%", __pu_val, (ptr),     \
+                              (err), ARM64_HAS_UAO);                   \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
        }                                                               \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,        \
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
                        CONFIG_ARM64_PAN));                             \
 } while (0)
 
index aab5bf09e9d902f7bdf3d09e61fcd97db748c505..2b79b8a89457bd70e20e24ba33c8029195e3284c 100644 (file)
@@ -16,6 +16,8 @@
 #ifndef __ASM_WORD_AT_A_TIME_H
 #define __ASM_WORD_AT_A_TIME_H
 
+#include <asm/uaccess.h>
+
 #ifndef __AARCH64EB__
 
 #include <linux/kernel.h>
@@ -81,10 +83,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
 #endif
        "       b       2b\n"
        "       .popsection\n"
-       "       .pushsection __ex_table,\"a\"\n"
-       "       .align  3\n"
-       "       .quad   1b, 3b\n"
-       "       .popsection"
+       _ASM_EXTABLE(1b, 3b)
        : "=&r" (ret), "=&r" (offset)
        : "r" (addr), "Q" (*(unsigned long *)addr));
 
index 208db3df135a48d1b7118da22697ad24cd75706e..b5c3933ed44163b2fb489a00553195afc1ff6806 100644 (file)
@@ -45,6 +45,7 @@
 #define PSR_A_BIT      0x00000100
 #define PSR_D_BIT      0x00000200
 #define PSR_PAN_BIT    0x00400000
+#define PSR_UAO_BIT    0x00800000
 #define PSR_Q_BIT      0x08000000
 #define PSR_V_BIT      0x10000000
 #define PSR_C_BIT      0x20000000
index 474691f8b13ab893cf403b8c1737a91aeff87bc0..49a2430b0786e6df6e8bd39a3d12811783eead7d 100644 (file)
@@ -30,6 +30,7 @@ arm64-obj-$(CONFIG_COMPAT)            += sys32.o kuser32.o signal32.o         \
                                           ../../arm/kernel/opcodes.o
 arm64-obj-$(CONFIG_FUNCTION_TRACER)    += ftrace.o entry-ftrace.o
 arm64-obj-$(CONFIG_MODULES)            += arm64ksyms.o module.o
+arm64-obj-$(CONFIG_ARM64_MODULE_PLTS)  += module-plts.o
 arm64-obj-$(CONFIG_PERF_EVENTS)                += perf_regs.o perf_callchain.o
 arm64-obj-$(CONFIG_HW_PERF_EVENTS)     += perf_event.o
 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
@@ -41,6 +42,8 @@ arm64-obj-$(CONFIG_EFI)                       += efi.o efi-entry.stub.o
 arm64-obj-$(CONFIG_PCI)                        += pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)   += armv8_deprecated.o
 arm64-obj-$(CONFIG_ACPI)               += acpi.o
+arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)        += acpi_parking_protocol.o
+arm64-obj-$(CONFIG_RANDOMIZE_BASE)     += kaslr.o
 
 obj-y                                  += $(arm64-obj-y) vdso/
 obj-m                                  += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
new file mode 100644 (file)
index 0000000..4b1e5a7
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * ARM64 ACPI Parking Protocol implementation
+ *
+ * Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *         Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/acpi.h>
+#include <linux/types.h>
+
+#include <asm/cpu_ops.h>
+
+struct cpu_mailbox_entry {
+       phys_addr_t mailbox_addr;
+       u8 version;
+       u8 gic_cpu_id;
+};
+
+static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS];
+
+void __init acpi_set_mailbox_entry(int cpu,
+                                  struct acpi_madt_generic_interrupt *p)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+       cpu_entry->mailbox_addr = p->parked_address;
+       cpu_entry->version = p->parking_version;
+       cpu_entry->gic_cpu_id = p->cpu_interface_number;
+}
+
+bool acpi_parking_protocol_valid(int cpu)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+       return cpu_entry->mailbox_addr && cpu_entry->version;
+}
+
+static int acpi_parking_protocol_cpu_init(unsigned int cpu)
+{
+       pr_debug("%s: ACPI parked addr=%llx\n", __func__,
+                 cpu_mailbox_entries[cpu].mailbox_addr);
+
+       return 0;
+}
+
+static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
+{
+       return 0;
+}
+
+struct parking_protocol_mailbox {
+       __le32 cpu_id;
+       __le32 reserved;
+       __le64 entry_point;
+};
+
+static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+       struct parking_protocol_mailbox __iomem *mailbox;
+       __le32 cpu_id;
+
+       /*
+        * Map mailbox memory with attribute device nGnRE (ie ioremap -
+        * this deviates from the parking protocol specifications since
+        * the mailboxes are required to be mapped nGnRnE; the attribute
+        * discrepancy is harmless insofar as the protocol specification
+        * is concerned).
+        * If the mailbox is mistakenly allocated in the linear mapping
+        * by FW ioremap will fail since the mapping will be prevented
+        * by the kernel (it clashes with the linear mapping attributes
+        * specifications).
+        */
+       mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+       if (!mailbox)
+               return -EIO;
+
+       cpu_id = readl_relaxed(&mailbox->cpu_id);
+       /*
+        * Check if firmware has set-up the mailbox entry properly
+        * before kickstarting the respective cpu.
+        */
+       if (cpu_id != ~0U) {
+               iounmap(mailbox);
+               return -ENXIO;
+       }
+
+       /*
+        * We write the entry point and cpu id as LE regardless of the
+        * native endianness of the kernel. Therefore, any boot-loaders
+        * that read this address need to convert this address to the
+        * Boot-Loader's endianness before jumping.
+        */
+       writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+       writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
+
+       arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+       iounmap(mailbox);
+
+       return 0;
+}
+
+static void acpi_parking_protocol_cpu_postboot(void)
+{
+       int cpu = smp_processor_id();
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+       struct parking_protocol_mailbox __iomem *mailbox;
+       __le64 entry_point;
+
+       /*
+        * Map mailbox memory with attribute device nGnRE (ie ioremap -
+        * this deviates from the parking protocol specifications since
+        * the mailboxes are required to be mapped nGnRnE; the attribute
+        * discrepancy is harmless insofar as the protocol specification
+        * is concerned).
+        * If the mailbox is mistakenly allocated in the linear mapping
+        * by FW ioremap will fail since the mapping will be prevented
+        * by the kernel (it clashes with the linear mapping attributes
+        * specifications).
+        */
+       mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+       if (!mailbox)
+               return;
+
+       entry_point = readl_relaxed(&mailbox->entry_point);
+       /*
+        * Check if firmware has cleared the entry_point as expected
+        * by the protocol specification.
+        */
+       WARN_ON(entry_point);
+
+       iounmap(mailbox);
+}
+
+const struct cpu_operations acpi_parking_protocol_ops = {
+       .name           = "parking-protocol",
+       .cpu_init       = acpi_parking_protocol_cpu_init,
+       .cpu_prepare    = acpi_parking_protocol_cpu_prepare,
+       .cpu_boot       = acpi_parking_protocol_cpu_boot,
+       .cpu_postboot   = acpi_parking_protocol_cpu_postboot
+};
index ab9db0e9818c0caa52aa040b7c01aa83183d774e..d2ee1b21a10ddd1bcce1718210c6ce7b7725cfe3 100644 (file)
@@ -158,9 +158,3 @@ void apply_alternatives(void *start, size_t length)
 
        __apply_alternatives(&region);
 }
-
-void free_alternatives_memory(void)
-{
-       free_reserved_area(__alt_instructions, __alt_instructions_end,
-                          0, "alternatives");
-}
index 937f5e58a4d340a27234c76b5a84fedcf9aa6373..c37202c0c838d01a71d56b05d114cd0b419ff480 100644 (file)
@@ -62,7 +62,7 @@ struct insn_emulation {
 };
 
 static LIST_HEAD(insn_emulation);
-static int nr_insn_emulated;
+static int nr_insn_emulated __initdata;
 static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
 
 static void register_emulation_hooks(struct insn_emulation_ops *ops)
@@ -173,7 +173,7 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
        return ret;
 }
 
-static void register_insn_emulation(struct insn_emulation_ops *ops)
+static void __init register_insn_emulation(struct insn_emulation_ops *ops)
 {
        unsigned long flags;
        struct insn_emulation *insn;
@@ -237,7 +237,7 @@ static struct ctl_table ctl_abi[] = {
        { }
 };
 
-static void register_insn_emulation_sysctl(struct ctl_table *table)
+static void __init register_insn_emulation_sysctl(struct ctl_table *table)
 {
        unsigned long flags;
        int i = 0;
@@ -297,11 +297,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
        "4:     mov             %w0, %w5\n"                     \
        "       b               3b\n"                           \
        "       .popsection"                                    \
-       "       .pushsection     __ex_table,\"a\"\n"            \
-       "       .align          3\n"                            \
-       "       .quad           0b, 4b\n"                       \
-       "       .quad           1b, 4b\n"                       \
-       "       .popsection\n"                                  \
+       _ASM_EXTABLE(0b, 4b)                                    \
+       _ASM_EXTABLE(1b, 4b)                                    \
        ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,    \
                CONFIG_ARM64_PAN)                               \
        : "=&r" (res), "+r" (data), "=&r" (temp)                \
index feb6b4efa6414846d5598ccb0913a544ba0cf441..e6bc988e8dbf0f69fc4b1a48f9a7b4a89ee713f3 100644 (file)
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
 
-#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
-#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-#define MIDR_THUNDERX  MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
-
-#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
-                       MIDR_ARCHITECTURE_MASK)
-
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
 {
-       u32 midr = read_cpuid_id();
-
-       if ((midr & CPU_MODEL_MASK) != entry->midr_model)
-               return false;
-
-       midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
-
-       return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
+       return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
+                                      entry->midr_range_min,
+                                      entry->midr_range_max);
 }
 
 #define MIDR_RANGE(model, min, max) \
index b6bd7d4477683393fb34dc6b055b07382e8ab050..c7cfb8fe06f94c7f5113abf0cb624980e4227127 100644 (file)
 #include <asm/smp_plat.h>
 
 extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations acpi_parking_protocol_ops;
 extern const struct cpu_operations cpu_psci_ops;
 
 const struct cpu_operations *cpu_ops[NR_CPUS];
 
-static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
        &smp_spin_table_ops,
        &cpu_psci_ops,
        NULL,
 };
 
+static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+       &acpi_parking_protocol_ops,
+#endif
+       &cpu_psci_ops,
+       NULL,
+};
+
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 {
-       const struct cpu_operations **ops = supported_cpu_ops;
+       const struct cpu_operations **ops;
+
+       ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 
        while (*ops) {
                if (!strcmp(name, (*ops)->name))
@@ -75,8 +86,16 @@ static const char *__init cpu_read_enable_method(int cpu)
                }
        } else {
                enable_method = acpi_get_enable_method(cpu);
-               if (!enable_method)
-                       pr_err("Unsupported ACPI enable-method\n");
+               if (!enable_method) {
+                       /*
+                        * In ACPI systems the boot CPU does not require
+                        * checking the enable method since for some
+                        * boot protocol (ie parking protocol) it need not
+                        * be initialized. Don't warn spuriously.
+                        */
+                       if (cpu != 0)
+                               pr_err("Unsupported ACPI enable-method\n");
+               }
        }
 
        return enable_method;
index 0669c63281ea01a93ef9794f9731b424b6afd28e..7566cad9fa1da5a882ada85b5801a8ae33f4da0c 100644 (file)
@@ -67,6 +67,10 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
                .width = 0,                             \
        }
 
+/* meta feature for alternatives */
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
+
 static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
@@ -123,6 +127,11 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
        ARM64_FTR_END,
 };
 
+static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
 static struct arm64_ftr_bits ftr_ctr[] = {
        U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),      /* RAO */
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
@@ -284,6 +293,7 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = {
        /* Op1 = 0, CRn = 0, CRm = 7 */
        ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
        ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+       ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
 
        /* Op1 = 3, CRn = 0, CRm = 0 */
        ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
@@ -408,6 +418,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
        init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
        init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
        init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
        init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
        init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
        init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
@@ -517,6 +528,8 @@ void update_cpu_features(int cpu,
                                      info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
        taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
                                      info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
+       taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
+                                     info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
 
        /*
         * EL3 is not our concern.
@@ -621,6 +634,18 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
        return has_sre;
 }
 
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
+{
+       u32 midr = read_cpuid_id();
+       u32 rv_min, rv_max;
+
+       /* Cavium ThunderX pass 1.x and 2.x */
+       rv_min = 0;
+       rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
+
+       return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -651,6 +676,28 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .min_field_value = 2,
        },
 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+       {
+               .desc = "Software prefetching using PRFM",
+               .capability = ARM64_HAS_NO_HW_PREFETCH,
+               .matches = has_no_hw_prefetch,
+       },
+#ifdef CONFIG_ARM64_UAO
+       {
+               .desc = "User Access Override",
+               .capability = ARM64_HAS_UAO,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR2_EL1,
+               .field_pos = ID_AA64MMFR2_UAO_SHIFT,
+               .min_field_value = 1,
+               .enable = cpu_enable_uao,
+       },
+#endif /* CONFIG_ARM64_UAO */
+#ifdef CONFIG_ARM64_PAN
+       {
+               .capability = ARM64_ALT_PAN_NOT_UAO,
+               .matches = cpufeature_pan_not_uao,
+       },
+#endif /* CONFIG_ARM64_PAN */
        {},
 };
 
@@ -684,7 +731,7 @@ static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
        {},
 };
 
-static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
+static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
 {
        switch (cap->hwcap_type) {
        case CAP_HWCAP:
@@ -729,12 +776,12 @@ static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *
        return rc;
 }
 
-static void setup_cpu_hwcaps(void)
+static void __init setup_cpu_hwcaps(void)
 {
        int i;
        const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
 
-       for (i = 0; hwcaps[i].desc; i++)
+       for (i = 0; hwcaps[i].matches; i++)
                if (hwcaps[i].matches(&hwcaps[i]))
                        cap_set_hwcap(&hwcaps[i]);
 }
@@ -744,11 +791,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 {
        int i;
 
-       for (i = 0; caps[i].desc; i++) {
+       for (i = 0; caps[i].matches; i++) {
                if (!caps[i].matches(&caps[i]))
                        continue;
 
-               if (!cpus_have_cap(caps[i].capability))
+               if (!cpus_have_cap(caps[i].capability) && caps[i].desc)
                        pr_info("%s %s\n", info, caps[i].desc);
                cpus_set_cap(caps[i].capability);
        }
@@ -758,11 +805,12 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  * Run through the enabled capabilities and enable() it on all active
  * CPUs
  */
-static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+static void __init
+enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
 {
        int i;
 
-       for (i = 0; caps[i].desc; i++)
+       for (i = 0; caps[i].matches; i++)
                if (caps[i].enable && cpus_have_cap(caps[i].capability))
                        on_each_cpu(caps[i].enable, NULL, true);
 }
@@ -790,35 +838,36 @@ static inline void set_sys_caps_initialised(void)
 static u64 __raw_read_system_reg(u32 sys_id)
 {
        switch (sys_id) {
-       case SYS_ID_PFR0_EL1:           return (u64)read_cpuid(ID_PFR0_EL1);
-       case SYS_ID_PFR1_EL1:           return (u64)read_cpuid(ID_PFR1_EL1);
-       case SYS_ID_DFR0_EL1:           return (u64)read_cpuid(ID_DFR0_EL1);
-       case SYS_ID_MMFR0_EL1:          return (u64)read_cpuid(ID_MMFR0_EL1);
-       case SYS_ID_MMFR1_EL1:          return (u64)read_cpuid(ID_MMFR1_EL1);
-       case SYS_ID_MMFR2_EL1:          return (u64)read_cpuid(ID_MMFR2_EL1);
-       case SYS_ID_MMFR3_EL1:          return (u64)read_cpuid(ID_MMFR3_EL1);
-       case SYS_ID_ISAR0_EL1:          return (u64)read_cpuid(ID_ISAR0_EL1);
-       case SYS_ID_ISAR1_EL1:          return (u64)read_cpuid(ID_ISAR1_EL1);
-       case SYS_ID_ISAR2_EL1:          return (u64)read_cpuid(ID_ISAR2_EL1);
-       case SYS_ID_ISAR3_EL1:          return (u64)read_cpuid(ID_ISAR3_EL1);
-       case SYS_ID_ISAR4_EL1:          return (u64)read_cpuid(ID_ISAR4_EL1);
-       case SYS_ID_ISAR5_EL1:          return (u64)read_cpuid(ID_ISAR4_EL1);
-       case SYS_MVFR0_EL1:             return (u64)read_cpuid(MVFR0_EL1);
-       case SYS_MVFR1_EL1:             return (u64)read_cpuid(MVFR1_EL1);
-       case SYS_MVFR2_EL1:             return (u64)read_cpuid(MVFR2_EL1);
-
-       case SYS_ID_AA64PFR0_EL1:       return (u64)read_cpuid(ID_AA64PFR0_EL1);
-       case SYS_ID_AA64PFR1_EL1:       return (u64)read_cpuid(ID_AA64PFR0_EL1);
-       case SYS_ID_AA64DFR0_EL1:       return (u64)read_cpuid(ID_AA64DFR0_EL1);
-       case SYS_ID_AA64DFR1_EL1:       return (u64)read_cpuid(ID_AA64DFR0_EL1);
-       case SYS_ID_AA64MMFR0_EL1:      return (u64)read_cpuid(ID_AA64MMFR0_EL1);
-       case SYS_ID_AA64MMFR1_EL1:      return (u64)read_cpuid(ID_AA64MMFR1_EL1);
-       case SYS_ID_AA64ISAR0_EL1:      return (u64)read_cpuid(ID_AA64ISAR0_EL1);
-       case SYS_ID_AA64ISAR1_EL1:      return (u64)read_cpuid(ID_AA64ISAR1_EL1);
-
-       case SYS_CNTFRQ_EL0:            return (u64)read_cpuid(CNTFRQ_EL0);
-       case SYS_CTR_EL0:               return (u64)read_cpuid(CTR_EL0);
-       case SYS_DCZID_EL0:             return (u64)read_cpuid(DCZID_EL0);
+       case SYS_ID_PFR0_EL1:           return read_cpuid(SYS_ID_PFR0_EL1);
+       case SYS_ID_PFR1_EL1:           return read_cpuid(SYS_ID_PFR1_EL1);
+       case SYS_ID_DFR0_EL1:           return read_cpuid(SYS_ID_DFR0_EL1);
+       case SYS_ID_MMFR0_EL1:          return read_cpuid(SYS_ID_MMFR0_EL1);
+       case SYS_ID_MMFR1_EL1:          return read_cpuid(SYS_ID_MMFR1_EL1);
+       case SYS_ID_MMFR2_EL1:          return read_cpuid(SYS_ID_MMFR2_EL1);
+       case SYS_ID_MMFR3_EL1:          return read_cpuid(SYS_ID_MMFR3_EL1);
+       case SYS_ID_ISAR0_EL1:          return read_cpuid(SYS_ID_ISAR0_EL1);
+       case SYS_ID_ISAR1_EL1:          return read_cpuid(SYS_ID_ISAR1_EL1);
+       case SYS_ID_ISAR2_EL1:          return read_cpuid(SYS_ID_ISAR2_EL1);
+       case SYS_ID_ISAR3_EL1:          return read_cpuid(SYS_ID_ISAR3_EL1);
+       case SYS_ID_ISAR4_EL1:          return read_cpuid(SYS_ID_ISAR4_EL1);
+       case SYS_ID_ISAR5_EL1:          return read_cpuid(SYS_ID_ISAR4_EL1);
+       case SYS_MVFR0_EL1:             return read_cpuid(SYS_MVFR0_EL1);
+       case SYS_MVFR1_EL1:             return read_cpuid(SYS_MVFR1_EL1);
+       case SYS_MVFR2_EL1:             return read_cpuid(SYS_MVFR2_EL1);
+
+       case SYS_ID_AA64PFR0_EL1:       return read_cpuid(SYS_ID_AA64PFR0_EL1);
+       case SYS_ID_AA64PFR1_EL1:       return read_cpuid(SYS_ID_AA64PFR0_EL1);
+       case SYS_ID_AA64DFR0_EL1:       return read_cpuid(SYS_ID_AA64DFR0_EL1);
+       case SYS_ID_AA64DFR1_EL1:       return read_cpuid(SYS_ID_AA64DFR0_EL1);
+       case SYS_ID_AA64MMFR0_EL1:      return read_cpuid(SYS_ID_AA64MMFR0_EL1);
+       case SYS_ID_AA64MMFR1_EL1:      return read_cpuid(SYS_ID_AA64MMFR1_EL1);
+       case SYS_ID_AA64MMFR2_EL1:      return read_cpuid(SYS_ID_AA64MMFR2_EL1);
+       case SYS_ID_AA64ISAR0_EL1:      return read_cpuid(SYS_ID_AA64ISAR0_EL1);
+       case SYS_ID_AA64ISAR1_EL1:      return read_cpuid(SYS_ID_AA64ISAR1_EL1);
+
+       case SYS_CNTFRQ_EL0:            return read_cpuid(SYS_CNTFRQ_EL0);
+       case SYS_CTR_EL0:               return read_cpuid(SYS_CTR_EL0);
+       case SYS_DCZID_EL0:             return read_cpuid(SYS_DCZID_EL0);
        default:
                BUG();
                return 0;
@@ -868,7 +917,7 @@ void verify_local_cpu_capabilities(void)
                return;
 
        caps = arm64_features;
-       for (i = 0; caps[i].desc; i++) {
+       for (i = 0; caps[i].matches; i++) {
                if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
                        continue;
                /*
@@ -881,7 +930,7 @@ void verify_local_cpu_capabilities(void)
                        caps[i].enable(NULL);
        }
 
-       for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
+       for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
                if (!cpus_have_hwcap(&caps[i]))
                        continue;
                if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
@@ -897,7 +946,7 @@ static inline void set_sys_caps_initialised(void)
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void setup_feature_capabilities(void)
+static void __init setup_feature_capabilities(void)
 {
        update_cpu_capabilities(arm64_features, "detected feature:");
        enable_cpu_capabilities(arm64_features);
@@ -927,3 +976,9 @@ void __init setup_cpu_features(void)
                pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
                        L1_CACHE_BYTES, cls);
 }
+
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
+{
+       return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
+}
index a5f2340396167d19ecc0931447c272af59fe6858..6f08c7445ecf39415a5023a71b49c01383f2141b 100644 (file)
@@ -202,35 +202,36 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
 {
        info->reg_cntfrq = arch_timer_get_cntfrq();
        info->reg_ctr = read_cpuid_cachetype();
-       info->reg_dczid = read_cpuid(DCZID_EL0);
+       info->reg_dczid = read_cpuid(SYS_DCZID_EL0);
        info->reg_midr = read_cpuid_id();
 
-       info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
-       info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
-       info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
-       info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
-       info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
-       info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
-       info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
-       info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
-
-       info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
-       info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
-       info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
-       info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
-       info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
-       info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
-       info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
-       info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
-       info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
-       info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
-       info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
-       info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
-       info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
-
-       info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
-       info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
-       info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+       info->reg_id_aa64dfr0 = read_cpuid(SYS_ID_AA64DFR0_EL1);
+       info->reg_id_aa64dfr1 = read_cpuid(SYS_ID_AA64DFR1_EL1);
+       info->reg_id_aa64isar0 = read_cpuid(SYS_ID_AA64ISAR0_EL1);
+       info->reg_id_aa64isar1 = read_cpuid(SYS_ID_AA64ISAR1_EL1);
+       info->reg_id_aa64mmfr0 = read_cpuid(SYS_ID_AA64MMFR0_EL1);
+       info->reg_id_aa64mmfr1 = read_cpuid(SYS_ID_AA64MMFR1_EL1);
+       info->reg_id_aa64mmfr2 = read_cpuid(SYS_ID_AA64MMFR2_EL1);
+       info->reg_id_aa64pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1);
+       info->reg_id_aa64pfr1 = read_cpuid(SYS_ID_AA64PFR1_EL1);
+
+       info->reg_id_dfr0 = read_cpuid(SYS_ID_DFR0_EL1);
+       info->reg_id_isar0 = read_cpuid(SYS_ID_ISAR0_EL1);
+       info->reg_id_isar1 = read_cpuid(SYS_ID_ISAR1_EL1);
+       info->reg_id_isar2 = read_cpuid(SYS_ID_ISAR2_EL1);
+       info->reg_id_isar3 = read_cpuid(SYS_ID_ISAR3_EL1);
+       info->reg_id_isar4 = read_cpuid(SYS_ID_ISAR4_EL1);
+       info->reg_id_isar5 = read_cpuid(SYS_ID_ISAR5_EL1);
+       info->reg_id_mmfr0 = read_cpuid(SYS_ID_MMFR0_EL1);
+       info->reg_id_mmfr1 = read_cpuid(SYS_ID_MMFR1_EL1);
+       info->reg_id_mmfr2 = read_cpuid(SYS_ID_MMFR2_EL1);
+       info->reg_id_mmfr3 = read_cpuid(SYS_ID_MMFR3_EL1);
+       info->reg_id_pfr0 = read_cpuid(SYS_ID_PFR0_EL1);
+       info->reg_id_pfr1 = read_cpuid(SYS_ID_PFR1_EL1);
+
+       info->reg_mvfr0 = read_cpuid(SYS_MVFR0_EL1);
+       info->reg_mvfr1 = read_cpuid(SYS_MVFR1_EL1);
+       info->reg_mvfr2 = read_cpuid(SYS_MVFR2_EL1);
 
        cpuinfo_detect_icache_policy(info);
 
index a773db92908b03d325c26dba0dc5ea9c287ef49d..f82036e02485a0abe27349e3ec76a00a491f7450 100644 (file)
@@ -61,7 +61,7 @@ ENTRY(entry)
         */
        mov     x20, x0         // DTB address
        ldr     x0, [sp, #16]   // relocated _text address
-       ldr     x21, =stext_offset
+       movz    x21, #:abs_g0:stext_offset
        add     x21, x0, x21
 
        /*
index 7ed3d75f630418b56a1add8c91b308b48cd69774..1f7f5a2b61bf0de999d80e6ced16bec120f716b6 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/cpufeature.h>
 #include <asm/errno.h>
 #include <asm/esr.h>
+#include <asm/irq.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 
 
        .if     \el == 0
        mrs     x21, sp_el0
-       get_thread_info tsk                     // Ensure MDSCR_EL1.SS is clear,
+       mov     tsk, sp
+       and     tsk, tsk, #~(THREAD_SIZE - 1)   // Ensure MDSCR_EL1.SS is clear,
        ldr     x19, [tsk, #TI_FLAGS]           // since we can unmask debug
        disable_step_tsk x19, x20               // exceptions when scheduling.
+
+       mov     x29, xzr                        // fp pointed to user-space
        .else
        add     x21, sp, #S_FRAME_SIZE
        .endif
        str     x21, [sp, #S_SYSCALLNO]
        .endif
 
+       /*
+        * Set sp_el0 to current thread_info.
+        */
+       .if     \el == 0
+       msr     sp_el0, tsk
+       .endif
+
        /*
         * Registers that may be useful after this macro is invoked:
         *
@@ -164,8 +175,44 @@ alternative_endif
        .endm
 
        .macro  get_thread_info, rd
-       mov     \rd, sp
-       and     \rd, \rd, #~(THREAD_SIZE - 1)   // top of stack
+       mrs     \rd, sp_el0
+       .endm
+
+       .macro  irq_stack_entry
+       mov     x19, sp                 // preserve the original sp
+
+       /*
+        * Compare sp with the current thread_info, if the top
+        * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
+        * should switch to the irq stack.
+        */
+       and     x25, x19, #~(THREAD_SIZE - 1)
+       cmp     x25, tsk
+       b.ne    9998f
+
+       this_cpu_ptr irq_stack, x25, x26
+       mov     x26, #IRQ_STACK_START_SP
+       add     x26, x25, x26
+
+       /* switch to the irq stack */
+       mov     sp, x26
+
+       /*
+        * Add a dummy stack frame, this non-standard format is fixed up
+        * by unwind_frame()
+        */
+       stp     x29, x19, [sp, #-16]!
+       mov     x29, sp
+
+9998:
+       .endm
+
+       /*
+        * x19 should be preserved between irq_stack_entry and
+        * irq_stack_exit.
+        */
+       .macro  irq_stack_exit
+       mov     sp, x19
        .endm
 
 /*
@@ -183,10 +230,11 @@ tsk       .req    x28             // current thread_info
  * Interrupt handling.
  */
        .macro  irq_handler
-       adrp    x1, handle_arch_irq
-       ldr     x1, [x1, #:lo12:handle_arch_irq]
+       ldr_l   x1, handle_arch_irq
        mov     x0, sp
+       irq_stack_entry
        blr     x1
+       irq_stack_exit
        .endm
 
        .text
@@ -358,10 +406,10 @@ el1_irq:
        bl      trace_hardirqs_off
 #endif
 
+       get_thread_info tsk
        irq_handler
 
 #ifdef CONFIG_PREEMPT
-       get_thread_info tsk
        ldr     w24, [tsk, #TI_PREEMPT]         // get preempt count
        cbnz    w24, 1f                         // preempt count != 0
        ldr     x0, [tsk, #TI_FLAGS]            // get flags
@@ -599,6 +647,8 @@ ENTRY(cpu_switch_to)
        ldp     x29, x9, [x8], #16
        ldr     lr, [x8]
        mov     sp, x9
+       and     x9, x9, #~(THREAD_SIZE - 1)
+       msr     sp_el0, x9
        ret
 ENDPROC(cpu_switch_to)
 
@@ -626,14 +676,14 @@ ret_fast_syscall_trace:
 work_pending:
        tbnz    x1, #TIF_NEED_RESCHED, work_resched
        /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
-       ldr     x2, [sp, #S_PSTATE]
        mov     x0, sp                          // 'regs'
-       tst     x2, #PSR_MODE_MASK              // user mode regs?
-       b.ne    no_work_pending                 // returning to kernel
        enable_irq                              // enable interrupts for do_notify_resume()
        bl      do_notify_resume
        b       ret_to_user
 work_resched:
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_off              // the IRQs are off here, inform the tracing code
+#endif
        bl      schedule
 
 /*
@@ -645,7 +695,6 @@ ret_to_user:
        and     x2, x1, #_TIF_WORK_MASK
        cbnz    x2, work_pending
        enable_step_tsk x1, x2
-no_work_pending:
        kernel_exit 0
 ENDPROC(ret_to_user)
 
index 4c46c54a3ad7ad817b8ba410565b8eff47cd3c08..acc1afd5c749a62b7c0bae5a14d2fd7dbc33adf2 100644 (file)
@@ -289,7 +289,7 @@ static struct notifier_block fpsimd_cpu_pm_notifier_block = {
        .notifier_call = fpsimd_cpu_pm_notifier,
 };
 
-static void fpsimd_pm_init(void)
+static void __init fpsimd_pm_init(void)
 {
        cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
 }
index c851be795080336938f4826cc0608234b0e34bfa..ebecf9aa33d12da8a564ea0314f59a71b89e64a0 100644 (file)
@@ -29,12 +29,11 @@ static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
 
        /*
         * Note:
-        * Due to modules and __init, code can disappear and change,
-        * we need to protect against faulting as well as code changing.
-        * We do this by aarch64_insn_*() which use the probe_kernel_*().
-        *
-        * No lock is held here because all the modifications are run
-        * through stop_machine().
+        * We are paranoid about modifying text, as if a bug were to happen, it
+        * could cause us to read or write to someplace that could cause harm.
+        * Carefully read and modify the code with aarch64_insn_*() which uses
+        * probe_kernel_*(), and make sure what we read is what we expected it
+        * to be before modifying it.
         */
        if (validate) {
                if (aarch64_insn_read((void *)pc, &replaced))
@@ -93,6 +92,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
        return ftrace_modify_code(pc, old, new, true);
 }
 
+void arch_ftrace_update_code(int command)
+{
+       ftrace_modify_all_code(command);
+}
+
 int __init ftrace_dyn_arch_init(void)
 {
        return 0;
@@ -125,23 +129,20 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
         * on other archs. It's unlikely on AArch64.
         */
        old = *parent;
-       *parent = return_hooker;
 
        trace.func = self_addr;
        trace.depth = current->curr_ret_stack + 1;
 
        /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace)) {
-               *parent = old;
+       if (!ftrace_graph_entry(&trace))
                return;
-       }
 
        err = ftrace_push_return_trace(old, self_addr, &trace.depth,
                                       frame_pointer);
-       if (err == -EBUSY) {
-               *parent = old;
+       if (err == -EBUSY)
                return;
-       }
+       else
+               *parent = return_hooker;
 }
 
 #ifdef CONFIG_DYNAMIC_FTRACE
index b685257926f0b0472550036f6d3ba34d17af7f62..a88a15447c3bb382970a940b39fb9d6e0e6b9529 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 #include <asm/cputype.h>
+#include <asm/elf.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
  * in the entry routines.
  */
        __HEAD
-
+_head:
        /*
         * DO NOT MODIFY. Image header expected by Linux boot-loaders.
         */
 #ifdef CONFIG_EFI
-efi_head:
        /*
         * This add instruction has no meaningful effect except that
         * its opcode forms the magic "MZ" signature required by UEFI.
@@ -83,9 +83,9 @@ efi_head:
        b       stext                           // branch to kernel start, magic
        .long   0                               // reserved
 #endif
-       .quad   _kernel_offset_le               // Image load offset from start of RAM, little-endian
-       .quad   _kernel_size_le                 // Effective size of kernel image, little-endian
-       .quad   _kernel_flags_le                // Informative flags, little-endian
+       le64sym _kernel_offset_le               // Image load offset from start of RAM, little-endian
+       le64sym _kernel_size_le                 // Effective size of kernel image, little-endian
+       le64sym _kernel_flags_le                // Informative flags, little-endian
        .quad   0                               // reserved
        .quad   0                               // reserved
        .quad   0                               // reserved
@@ -94,14 +94,14 @@ efi_head:
        .byte   0x4d
        .byte   0x64
 #ifdef CONFIG_EFI
-       .long   pe_header - efi_head            // Offset to the PE header.
+       .long   pe_header - _head               // Offset to the PE header.
 #else
        .word   0                               // reserved
 #endif
 
 #ifdef CONFIG_EFI
        .globl  __efistub_stext_offset
-       .set    __efistub_stext_offset, stext - efi_head
+       .set    __efistub_stext_offset, stext - _head
        .align 3
 pe_header:
        .ascii  "PE"
@@ -124,7 +124,7 @@ optional_header:
        .long   _end - stext                    // SizeOfCode
        .long   0                               // SizeOfInitializedData
        .long   0                               // SizeOfUninitializedData
-       .long   __efistub_entry - efi_head      // AddressOfEntryPoint
+       .long   __efistub_entry - _head         // AddressOfEntryPoint
        .long   __efistub_stext_offset          // BaseOfCode
 
 extra_header_fields:
@@ -139,7 +139,7 @@ extra_header_fields:
        .short  0                               // MinorSubsystemVersion
        .long   0                               // Win32VersionValue
 
-       .long   _end - efi_head                 // SizeOfImage
+       .long   _end - _head                    // SizeOfImage
 
        // Everything before the kernel image is considered part of the header
        .long   __efistub_stext_offset          // SizeOfHeaders
@@ -210,6 +210,7 @@ section_table:
 ENTRY(stext)
        bl      preserve_boot_args
        bl      el2_setup                       // Drop to EL1, w20=cpu_boot_mode
+       mov     x23, xzr                        // KASLR offset, defaults to 0
        adrp    x24, __PHYS_OFFSET
        bl      set_cpu_boot_mode_flag
        bl      __create_page_tables            // x25=TTBR0, x26=TTBR1
@@ -219,11 +220,13 @@ ENTRY(stext)
         * On return, the CPU will be ready for the MMU to be turned on and
         * the TCR will have been set.
         */
-       ldr     x27, =__mmap_switched           // address to jump to after
+       ldr     x27, 0f                         // address to jump to after
                                                // MMU has been enabled
        adr_l   lr, __enable_mmu                // return (PIC) address
        b       __cpu_setup                     // initialise processor
 ENDPROC(stext)
+       .align  3
+0:     .quad   __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
 
 /*
  * Preserve the arguments passed by the bootloader in x0 .. x3
@@ -311,7 +314,7 @@ ENDPROC(preserve_boot_args)
 __create_page_tables:
        adrp    x25, idmap_pg_dir
        adrp    x26, swapper_pg_dir
-       mov     x27, lr
+       mov     x28, lr
 
        /*
         * Invalidate the idmap and swapper page tables to avoid potential
@@ -389,9 +392,11 @@ __create_page_tables:
         * Map the kernel image (starting with PHYS_OFFSET).
         */
        mov     x0, x26                         // swapper_pg_dir
-       mov     x5, #PAGE_OFFSET
+       ldr     x5, =KIMAGE_VADDR
+       add     x5, x5, x23                     // add KASLR displacement
        create_pgd_entry x0, x5, x3, x6
-       ldr     x6, =KERNEL_END                 // __va(KERNEL_END)
+       ldr     w6, kernel_img_size
+       add     x6, x6, x5
        mov     x3, x24                         // phys offset
        create_block_map x0, x7, x3, x5, x6
 
@@ -405,9 +410,11 @@ __create_page_tables:
        dmb     sy
        bl      __inval_cache_range
 
-       mov     lr, x27
-       ret
+       ret     x28
 ENDPROC(__create_page_tables)
+
+kernel_img_size:
+       .long   _end - (_head - TEXT_OFFSET)
        .ltorg
 
 /*
@@ -415,20 +422,80 @@ ENDPROC(__create_page_tables)
  */
        .set    initial_sp, init_thread_union + THREAD_START_SP
 __mmap_switched:
-       adr_l   x6, __bss_start
-       adr_l   x7, __bss_stop
+       mov     x28, lr                         // preserve LR
+       adr_l   x8, vectors                     // load VBAR_EL1 with virtual
+       msr     vbar_el1, x8                    // vector table address
+       isb
 
-1:     cmp     x6, x7
+       // Clear BSS
+       adr_l   x0, __bss_start
+       mov     x1, xzr
+       adr_l   x2, __bss_stop
+       sub     x2, x2, x0
+       bl      __pi_memset
+       dsb     ishst                           // Make zero page visible to PTW
+
+#ifdef CONFIG_RELOCATABLE
+
+       /*
+        * Iterate over each entry in the relocation table, and apply the
+        * relocations in place.
+        */
+       adr_l   x8, __dynsym_start              // start of symbol table
+       adr_l   x9, __reloc_start               // start of reloc table
+       adr_l   x10, __reloc_end                // end of reloc table
+
+0:     cmp     x9, x10
        b.hs    2f
-       str     xzr, [x6], #8                   // Clear BSS
-       b       1b
-2:
+       ldp     x11, x12, [x9], #24
+       ldr     x13, [x9, #-8]
+       cmp     w12, #R_AARCH64_RELATIVE
+       b.ne    1f
+       add     x13, x13, x23                   // relocate
+       str     x13, [x11, x23]
+       b       0b
+
+1:     cmp     w12, #R_AARCH64_ABS64
+       b.ne    0b
+       add     x12, x12, x12, lsl #1           // symtab offset: 24x top word
+       add     x12, x8, x12, lsr #(32 - 3)     // ... shifted into bottom word
+       ldrsh   w14, [x12, #6]                  // Elf64_Sym::st_shndx
+       ldr     x15, [x12, #8]                  // Elf64_Sym::st_value
+       cmp     w14, #-0xf                      // SHN_ABS (0xfff1) ?
+       add     x14, x15, x23                   // relocate
+       csel    x15, x14, x15, ne
+       add     x15, x13, x15
+       str     x15, [x11, x23]
+       b       0b
+
+2:     adr_l   x8, kimage_vaddr                // make relocated kimage_vaddr
+       dc      cvac, x8                        // value visible to secondaries
+       dsb     sy                              // with MMU off
+#endif
+
        adr_l   sp, initial_sp, x4
+       mov     x4, sp
+       and     x4, x4, #~(THREAD_SIZE - 1)
+       msr     sp_el0, x4                      // Save thread_info
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
-       str_l   x24, memstart_addr, x6          // Save PHYS_OFFSET
+
+       ldr_l   x4, kimage_vaddr                // Save the offset between
+       sub     x4, x4, x24                     // the kernel virtual and
+       str_l   x4, kimage_voffset, x5          // physical mappings
+
        mov     x29, #0
 #ifdef CONFIG_KASAN
        bl      kasan_early_init
+#endif
+#ifdef CONFIG_RANDOMIZE_BASE
+       cbnz    x23, 0f                         // already running randomized?
+       mov     x0, x21                         // pass FDT address in x0
+       bl      kaslr_early_init                // parse FDT for KASLR options
+       cbz     x0, 0f                          // KASLR disabled? just proceed
+       mov     x23, x0                         // record KASLR offset
+       ret     x28                             // we must enable KASLR, return
+                                               // to __enable_mmu()
+0:
 #endif
        b       start_kernel
 ENDPROC(__mmap_switched)
@@ -438,6 +505,10 @@ ENDPROC(__mmap_switched)
  * hotplug and needs to have the same protections as the text region
  */
        .section ".text","ax"
+
+ENTRY(kimage_vaddr)
+       .quad           _text - TEXT_OFFSET
+
 /*
  * If we're fortunate enough to boot at EL2, ensure that the world is
  * sane before dropping to EL1.
@@ -603,14 +674,22 @@ ENTRY(secondary_startup)
        adrp    x26, swapper_pg_dir
        bl      __cpu_setup                     // initialise processor
 
-       ldr     x21, =secondary_data
-       ldr     x27, =__secondary_switched      // address to jump to after enabling the MMU
+       ldr     x8, kimage_vaddr
+       ldr     w9, 0f
+       sub     x27, x8, w9, sxtw               // address to jump to after enabling the MMU
        b       __enable_mmu
 ENDPROC(secondary_startup)
+0:     .long   (_text - TEXT_OFFSET) - __secondary_switched
 
 ENTRY(__secondary_switched)
-       ldr     x0, [x21]                       // get secondary_data.stack
+       adr_l   x5, vectors
+       msr     vbar_el1, x5
+       isb
+
+       ldr_l   x0, secondary_data              // get secondary_data.stack
        mov     sp, x0
+       and     x0, x0, #~(THREAD_SIZE - 1)
+       msr     sp_el0, x0                      // save thread_info
        mov     x29, #0
        b       secondary_start_kernel
 ENDPROC(__secondary_switched)
@@ -628,12 +707,11 @@ ENDPROC(__secondary_switched)
  */
        .section        ".idmap.text", "ax"
 __enable_mmu:
+       mrs     x18, sctlr_el1                  // preserve old SCTLR_EL1 value
        mrs     x1, ID_AA64MMFR0_EL1
        ubfx    x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
        cmp     x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
        b.ne    __no_granule_support
-       ldr     x5, =vectors
-       msr     vbar_el1, x5
        msr     ttbr0_el1, x25                  // load TTBR0
        msr     ttbr1_el1, x26                  // load TTBR1
        isb
@@ -647,6 +725,26 @@ __enable_mmu:
        ic      iallu
        dsb     nsh
        isb
+#ifdef CONFIG_RANDOMIZE_BASE
+       mov     x19, x0                         // preserve new SCTLR_EL1 value
+       blr     x27
+
+       /*
+        * If we return here, we have a KASLR displacement in x23 which we need
+        * to take into account by discarding the current kernel mapping and
+        * creating a new one.
+        */
+       msr     sctlr_el1, x18                  // disable the MMU
+       isb
+       bl      __create_page_tables            // recreate kernel mapping
+
+       msr     sctlr_el1, x19                  // re-enable the MMU
+       isb
+       ic      iallu                           // flush instructions fetched
+       dsb     nsh                             // via old mapping
+       isb
+       add     x27, x27, x23                   // relocated __mmap_switched
+#endif
        br      x27
 ENDPROC(__enable_mmu)
 
index bc2abb8b1599576ae2dec02bce0c46c48fc707dd..db1bf57948f175a0fd3a9847897889ed5169c1e0 100644 (file)
  * There aren't any ELF relocations we can use to endian-swap values known only
  * at link time (e.g. the subtraction of two symbol addresses), so we must get
  * the linker to endian-swap certain values before emitting them.
+ *
+ * Note that, in order for this to work when building the ELF64 PIE executable
+ * (for KASLR), these values should not be referenced via R_AARCH64_ABS64
+ * relocations, since these are fixed up at runtime rather than at build time
+ * when PIE is in effect. So we need to split them up in 32-bit high and low
+ * words.
  */
 #ifdef CONFIG_CPU_BIG_ENDIAN
-#define DATA_LE64(data)                                        \
-       ((((data) & 0x00000000000000ff) << 56) |        \
-        (((data) & 0x000000000000ff00) << 40) |        \
-        (((data) & 0x0000000000ff0000) << 24) |        \
-        (((data) & 0x00000000ff000000) << 8)  |        \
-        (((data) & 0x000000ff00000000) >> 8)  |        \
-        (((data) & 0x0000ff0000000000) >> 24) |        \
-        (((data) & 0x00ff000000000000) >> 40) |        \
-        (((data) & 0xff00000000000000) >> 56))
+#define DATA_LE32(data)                                \
+       ((((data) & 0x000000ff) << 24) |        \
+        (((data) & 0x0000ff00) << 8)  |        \
+        (((data) & 0x00ff0000) >> 8)  |        \
+        (((data) & 0xff000000) >> 24))
 #else
-#define DATA_LE64(data) ((data) & 0xffffffffffffffff)
+#define DATA_LE32(data) ((data) & 0xffffffff)
 #endif
 
+#define DEFINE_IMAGE_LE64(sym, data)                           \
+       sym##_lo32 = DATA_LE32((data) & 0xffffffff);            \
+       sym##_hi32 = DATA_LE32((data) >> 32)
+
 #ifdef CONFIG_CPU_BIG_ENDIAN
-#define __HEAD_FLAG_BE 1
+#define __HEAD_FLAG_BE         1
 #else
-#define __HEAD_FLAG_BE 0
+#define __HEAD_FLAG_BE         0
 #endif
 
-#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
+#define __HEAD_FLAG_PAGE_SIZE  ((PAGE_SHIFT - 10) / 2)
+
+#define __HEAD_FLAG_PHYS_BASE  1
 
-#define __HEAD_FLAGS   ((__HEAD_FLAG_BE << 0) |        \
-                        (__HEAD_FLAG_PAGE_SIZE << 1))
+#define __HEAD_FLAGS           ((__HEAD_FLAG_BE << 0) |        \
+                                (__HEAD_FLAG_PAGE_SIZE << 1) | \
+                                (__HEAD_FLAG_PHYS_BASE << 3))
 
 /*
  * These will output as part of the Image header, which should be little-endian
  * endian swapped in head.S, all are done here for consistency.
  */
 #define HEAD_SYMBOLS                                           \
-       _kernel_size_le         = DATA_LE64(_end - _text);      \
-       _kernel_offset_le       = DATA_LE64(TEXT_OFFSET);       \
-       _kernel_flags_le        = DATA_LE64(__HEAD_FLAGS);
+       DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text);       \
+       DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET);      \
+       DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
 
 #ifdef CONFIG_EFI
 
+/*
+ * Prevent the symbol aliases below from being emitted into the kallsyms
+ * table, by forcing them to be absolute symbols (which are conveniently
+ * ignored by scripts/kallsyms) rather than section relative symbols.
+ * The distinction is only relevant for partial linking, and only for symbols
+ * that are defined within a section declaration (which is not the case for
+ * the definitions below) so the resulting values will be identical.
+ */
+#define KALLSYMS_HIDE(sym)     ABSOLUTE(sym)
+
 /*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
  * isolate it from the kernel proper. The following symbols are legally
  * linked at. The routines below are all implemented in assembler in a
  * position independent manner
  */
-__efistub_memcmp               = __pi_memcmp;
-__efistub_memchr               = __pi_memchr;
-__efistub_memcpy               = __pi_memcpy;
-__efistub_memmove              = __pi_memmove;
-__efistub_memset               = __pi_memset;
-__efistub_strlen               = __pi_strlen;
-__efistub_strcmp               = __pi_strcmp;
-__efistub_strncmp              = __pi_strncmp;
-__efistub___flush_dcache_area  = __pi___flush_dcache_area;
+__efistub_memcmp               = KALLSYMS_HIDE(__pi_memcmp);
+__efistub_memchr               = KALLSYMS_HIDE(__pi_memchr);
+__efistub_memcpy               = KALLSYMS_HIDE(__pi_memcpy);
+__efistub_memmove              = KALLSYMS_HIDE(__pi_memmove);
+__efistub_memset               = KALLSYMS_HIDE(__pi_memset);
+__efistub_strlen               = KALLSYMS_HIDE(__pi_strlen);
+__efistub_strcmp               = KALLSYMS_HIDE(__pi_strcmp);
+__efistub_strncmp              = KALLSYMS_HIDE(__pi_strncmp);
+__efistub___flush_dcache_area  = KALLSYMS_HIDE(__pi___flush_dcache_area);
 
 #ifdef CONFIG_KASAN
-__efistub___memcpy             = __pi_memcpy;
-__efistub___memmove            = __pi_memmove;
-__efistub___memset             = __pi_memset;
+__efistub___memcpy             = KALLSYMS_HIDE(__pi_memcpy);
+__efistub___memmove            = KALLSYMS_HIDE(__pi_memmove);
+__efistub___memset             = KALLSYMS_HIDE(__pi_memset);
 #endif
 
-__efistub__text                        = _text;
-__efistub__end                 = _end;
-__efistub__edata               = _edata;
+__efistub__text                        = KALLSYMS_HIDE(_text);
+__efistub__end                 = KALLSYMS_HIDE(_end);
+__efistub__edata               = KALLSYMS_HIDE(_edata);
 
 #endif
 
index 9f17ec071ee0e8a8b1380133cf319a9ad1c80de5..2386b26c071274d4d563a4fdc5e864aedda37204 100644 (file)
@@ -30,6 +30,9 @@
 
 unsigned long irq_err_count;
 
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
+DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
+
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
        show_ipi_list(p, prec);
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
new file mode 100644 (file)
index 0000000..5829839
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/libfdt.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+
+#include <asm/fixmap.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+
+u64 __read_mostly module_alloc_base;
+u16 __initdata memstart_offset_seed;
+
+static __init u64 get_kaslr_seed(void *fdt)
+{
+       int node, len;
+       u64 *prop;
+       u64 ret;
+
+       node = fdt_path_offset(fdt, "/chosen");
+       if (node < 0)
+               return 0;
+
+       prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
+       if (!prop || len != sizeof(u64))
+               return 0;
+
+       ret = fdt64_to_cpu(*prop);
+       *prop = 0;
+       return ret;
+}
+
+static __init const u8 *get_cmdline(void *fdt)
+{
+       static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
+
+       if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+               int node;
+               const u8 *prop;
+
+               node = fdt_path_offset(fdt, "/chosen");
+               if (node < 0)
+                       goto out;
+
+               prop = fdt_getprop(fdt, node, "bootargs", NULL);
+               if (!prop)
+                       goto out;
+               return prop;
+       }
+out:
+       return default_cmdline;
+}
+
+extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
+                                      pgprot_t prot);
+
+/*
+ * This routine will be executed with the kernel mapped at its default virtual
+ * address, and if it returns successfully, the kernel will be remapped, and
+ * start_kernel() will be executed from a randomized virtual offset. The
+ * relocation will result in all absolute references (e.g., static variables
+ * containing function pointers) to be reinitialized, and zero-initialized
+ * .bss variables will be reset to 0.
+ */
+u64 __init kaslr_early_init(u64 dt_phys)
+{
+       void *fdt;
+       u64 seed, offset, mask, module_range;
+       const u8 *cmdline, *str;
+       int size;
+
+       /*
+        * Set a reasonable default for module_alloc_base in case
+        * we end up running with module randomization disabled.
+        */
+       module_alloc_base = (u64)_etext - MODULES_VSIZE;
+
+       /*
+        * Try to map the FDT early. If this fails, we simply bail,
+        * and proceed with KASLR disabled. We will make another
+        * attempt at mapping the FDT in setup_machine()
+        */
+       early_fixmap_init();
+       fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+       if (!fdt)
+               return 0;
+
+       /*
+        * Retrieve (and wipe) the seed from the FDT
+        */
+       seed = get_kaslr_seed(fdt);
+       if (!seed)
+               return 0;
+
+       /*
+        * Check if 'nokaslr' appears on the command line, and
+        * return 0 if that is the case.
+        */
+       cmdline = get_cmdline(fdt);
+       str = strstr(cmdline, "nokaslr");
+       if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
+               return 0;
+
+       /*
+        * OK, so we are proceeding with KASLR enabled. Calculate a suitable
+        * kernel image offset from the seed. Let's place the kernel in the
+        * lower half of the VMALLOC area (VA_BITS - 2).
+        * Even if we could randomize at page granularity for 16k and 64k pages,
+        * let's always round to 2 MB so we don't interfere with the ability to
+        * map using contiguous PTEs
+        */
+       mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
+       offset = seed & mask;
+
+       /* use the top 16 bits to randomize the linear region */
+       memstart_offset_seed = seed >> 48;
+
+       /*
+        * The kernel Image should not extend across a 1GB/32MB/512MB alignment
+        * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
+        * happens, increase the KASLR offset by the size of the kernel image.
+        */
+       if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
+           (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
+               offset = (offset + (u64)(_end - _text)) & mask;
+
+       if (IS_ENABLED(CONFIG_KASAN))
+               /*
+                * KASAN does not expect the module region to intersect the
+                * vmalloc region, since shadow memory is allocated for each
+                * module at load time, whereas the vmalloc region is shadowed
+                * by KASAN zero pages. So keep modules out of the vmalloc
+                * region if KASAN is enabled.
+                */
+               return offset;
+
+       if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
+               /*
+                * Randomize the module region independently from the core
+                * kernel. This prevents modules from leaking any information
+                * about the address of the kernel itself, but results in
+                * branches between modules and the core kernel that are
+                * resolved via PLTs. (Branches between modules will be
+                * resolved normally.)
+                */
+               module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE;
+               module_alloc_base = VMALLOC_START;
+       } else {
+               /*
+                * Randomize the module region by setting module_alloc_base to
+                * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
+                * _stext) . This guarantees that the resulting region still
+                * covers [_stext, _etext], and that all relative branches can
+                * be resolved without veneers.
+                */
+               module_range = MODULES_VSIZE - (u64)(_etext - _stext);
+               module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
+       }
+
+       /* use the lower 21 bits to randomize the base of the module region */
+       module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
+       module_alloc_base &= PAGE_MASK;
+
+       return offset;
+}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
new file mode 100644 (file)
index 0000000..1ce90d8
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2014-2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sort.h>
+
+struct plt_entry {
+       /*
+        * A program that conforms to the AArch64 Procedure Call Standard
+        * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
+        * IP1 (x17) may be inserted at any branch instruction that is
+        * exposed to a relocation that supports long branches. Since that
+        * is exactly what we are dealing with here, we are free to use x16
+        * as a scratch register in the PLT veneers.
+        */
+       __le32  mov0;   /* movn x16, #0x....                    */
+       __le32  mov1;   /* movk x16, #0x...., lsl #16           */
+       __le32  mov2;   /* movk x16, #0x...., lsl #32           */
+       __le32  br;     /* br   x16                             */
+};
+
+u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+                         Elf64_Sym *sym)
+{
+       struct plt_entry *plt = (struct plt_entry *)mod->arch.plt->sh_addr;
+       int i = mod->arch.plt_num_entries;
+       u64 val = sym->st_value + rela->r_addend;
+
+       /*
+        * We only emit PLT entries against undefined (SHN_UNDEF) symbols,
+        * which are listed in the ELF symtab section, but without a type
+        * or a size.
+        * So, similar to how the module loader uses the Elf64_Sym::st_value
+        * field to store the resolved addresses of undefined symbols, let's
+        * borrow the Elf64_Sym::st_size field (whose value is never used by
+        * the module loader, even for symbols that are defined) to record
+        * the address of a symbol's associated PLT entry as we emit it for a
+        * zero addend relocation (which is the only kind we have to deal with
+        * in practice). This allows us to find duplicates without having to
+        * go through the table every time.
+        */
+       if (rela->r_addend == 0 && sym->st_size != 0) {
+               BUG_ON(sym->st_size < (u64)plt || sym->st_size >= (u64)&plt[i]);
+               return sym->st_size;
+       }
+
+       mod->arch.plt_num_entries++;
+       BUG_ON(mod->arch.plt_num_entries > mod->arch.plt_max_entries);
+
+       /*
+        * MOVK/MOVN/MOVZ opcode:
+        * +--------+------------+--------+-----------+-------------+---------+
+        * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
+        * +--------+------------+--------+-----------+-------------+---------+
+        *
+        * Rd     := 0x10 (x16)
+        * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
+        * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
+        * sf     := 1 (64-bit variant)
+        */
+       plt[i] = (struct plt_entry){
+               cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
+               cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
+               cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
+               cpu_to_le32(0xd61f0200)
+       };
+
+       if (rela->r_addend == 0)
+               sym->st_size = (u64)&plt[i];
+
+       return (u64)&plt[i];
+}
+
+#define cmp_3way(a,b)  ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rela(const void *a, const void *b)
+{
+       const Elf64_Rela *x = a, *y = b;
+       int i;
+
+       /* sort by type, symbol index and addend */
+       i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
+       if (i == 0)
+               i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
+       if (i == 0)
+               i = cmp_3way(x->r_addend, y->r_addend);
+       return i;
+}
+
+static bool duplicate_rel(const Elf64_Rela *rela, int num)
+{
+       /*
+        * Entries are sorted by type, symbol index and addend. That means
+        * that, if a duplicate entry exists, it must be in the preceding
+        * slot.
+        */
+       return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
+}
+
+static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
+{
+       unsigned int ret = 0;
+       Elf64_Sym *s;
+       int i;
+
+       for (i = 0; i < num; i++) {
+               switch (ELF64_R_TYPE(rela[i].r_info)) {
+               case R_AARCH64_JUMP26:
+               case R_AARCH64_CALL26:
+                       /*
+                        * We only have to consider branch targets that resolve
+                        * to undefined symbols. This is not simply a heuristic,
+                        * it is a fundamental limitation, since the PLT itself
+                        * is part of the module, and needs to be within 128 MB
+                        * as well, so modules can never grow beyond that limit.
+                        */
+                       s = syms + ELF64_R_SYM(rela[i].r_info);
+                       if (s->st_shndx != SHN_UNDEF)
+                               break;
+
+                       /*
+                        * Jump relocations with non-zero addends against
+                        * undefined symbols are supported by the ELF spec, but
+                        * do not occur in practice (e.g., 'jump n bytes past
+                        * the entry point of undefined function symbol f').
+                        * So we need to support them, but there is no need to
+                        * take them into consideration when trying to optimize
+                        * this code. So let's only check for duplicates when
+                        * the addend is zero: this allows us to record the PLT
+                        * entry address in the symbol table itself, rather than
+                        * having to search the list for duplicates each time we
+                        * emit one.
+                        */
+                       if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
+                               ret++;
+                       break;
+               }
+       }
+       return ret;
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+                             char *secstrings, struct module *mod)
+{
+       unsigned long plt_max_entries = 0;
+       Elf64_Sym *syms = NULL;
+       int i;
+
+       /*
+        * Find the empty .plt section so we can expand it to store the PLT
+        * entries. Record the symtab address as well.
+        */
+       for (i = 0; i < ehdr->e_shnum; i++) {
+               if (strcmp(".plt", secstrings + sechdrs[i].sh_name) == 0)
+                       mod->arch.plt = sechdrs + i;
+               else if (sechdrs[i].sh_type == SHT_SYMTAB)
+                       syms = (Elf64_Sym *)sechdrs[i].sh_addr;
+       }
+
+       if (!mod->arch.plt) {
+               pr_err("%s: module PLT section missing\n", mod->name);
+               return -ENOEXEC;
+       }
+       if (!syms) {
+               pr_err("%s: module symtab section missing\n", mod->name);
+               return -ENOEXEC;
+       }
+
+       for (i = 0; i < ehdr->e_shnum; i++) {
+               Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
+               int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
+               Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
+
+               if (sechdrs[i].sh_type != SHT_RELA)
+                       continue;
+
+               /* ignore relocations that operate on non-exec sections */
+               if (!(dstsec->sh_flags & SHF_EXECINSTR))
+                       continue;
+
+               /* sort by type, symbol index and addend */
+               sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
+
+               plt_max_entries += count_plts(syms, rels, numrels);
+       }
+
+       mod->arch.plt->sh_type = SHT_NOBITS;
+       mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+       mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
+       mod->arch.plt->sh_size = plt_max_entries * sizeof(struct plt_entry);
+       mod->arch.plt_num_entries = 0;
+       mod->arch.plt_max_entries = plt_max_entries;
+       return 0;
+}
index f4bc779e62e887547b7a17b7672487f0851e1479..7f316982ce00186262728518f3a03f7871fb7dd7 100644 (file)
 #include <asm/insn.h>
 #include <asm/sections.h>
 
-#define        AARCH64_INSN_IMM_MOVNZ          AARCH64_INSN_IMM_MAX
-#define        AARCH64_INSN_IMM_MOVK           AARCH64_INSN_IMM_16
-
 void *module_alloc(unsigned long size)
 {
        void *p;
 
-       p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
+       p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
+                               module_alloc_base + MODULES_VSIZE,
                                GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
                                NUMA_NO_NODE, __builtin_return_address(0));
 
+       if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+           !IS_ENABLED(CONFIG_KASAN))
+               /*
+                * KASAN can only deal with module allocations being served
+                * from the reserved module region, since the remainder of
+                * the vmalloc region is already backed by zero shadow pages,
+                * and punching holes into it is non-trivial. Since the module
+                * region is not randomized when KASAN is enabled, it is even
+                * less likely that the module region gets exhausted, so we
+                * can simply omit this fallback in that case.
+                */
+               p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
+                               VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+                               NUMA_NO_NODE, __builtin_return_address(0));
+
        if (p && (kasan_module_alloc(p, size) < 0)) {
                vfree(p);
                return NULL;
@@ -75,15 +88,18 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
 
 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
 {
-       u64 imm_mask = (1 << len) - 1;
        s64 sval = do_reloc(op, place, val);
 
        switch (len) {
        case 16:
                *(s16 *)place = sval;
+               if (sval < S16_MIN || sval > U16_MAX)
+                       return -ERANGE;
                break;
        case 32:
                *(s32 *)place = sval;
+               if (sval < S32_MIN || sval > U32_MAX)
+                       return -ERANGE;
                break;
        case 64:
                *(s64 *)place = sval;
@@ -92,34 +108,23 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
                pr_err("Invalid length (%d) for data relocation\n", len);
                return 0;
        }
-
-       /*
-        * Extract the upper value bits (including the sign bit) and
-        * shift them to bit 0.
-        */
-       sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
-
-       /*
-        * Overflow has occurred if the value is not representable in
-        * len bits (i.e the bottom len bits are not sign-extended and
-        * the top bits are not all zero).
-        */
-       if ((u64)(sval + 1) > 2)
-               return -ERANGE;
-
        return 0;
 }
 
+enum aarch64_insn_movw_imm_type {
+       AARCH64_INSN_IMM_MOVNZ,
+       AARCH64_INSN_IMM_MOVKZ,
+};
+
 static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
-                          int lsb, enum aarch64_insn_imm_type imm_type)
+                          int lsb, enum aarch64_insn_movw_imm_type imm_type)
 {
-       u64 imm, limit = 0;
+       u64 imm;
        s64 sval;
        u32 insn = le32_to_cpu(*(u32 *)place);
 
        sval = do_reloc(op, place, val);
-       sval >>= lsb;
-       imm = sval & 0xffff;
+       imm = sval >> lsb;
 
        if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
                /*
@@ -128,7 +133,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
                 * immediate is less than zero.
                 */
                insn &= ~(3 << 29);
-               if ((s64)imm >= 0) {
+               if (sval >= 0) {
                        /* >=0: Set the instruction to MOVZ (opcode 10b). */
                        insn |= 2 << 29;
                } else {
@@ -140,29 +145,13 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
                         */
                        imm = ~imm;
                }
-               imm_type = AARCH64_INSN_IMM_MOVK;
        }
 
        /* Update the instruction with the new encoding. */
-       insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+       insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
        *(u32 *)place = cpu_to_le32(insn);
 
-       /* Shift out the immediate field. */
-       sval >>= 16;
-
-       /*
-        * For unsigned immediates, the overflow check is straightforward.
-        * For signed immediates, the sign bit is actually the bit past the
-        * most significant bit of the field.
-        * The AARCH64_INSN_IMM_16 immediate type is unsigned.
-        */
-       if (imm_type != AARCH64_INSN_IMM_16) {
-               sval++;
-               limit++;
-       }
-
-       /* Check the upper bits depending on the sign of the immediate. */
-       if ((u64)sval > limit)
+       if (imm > U16_MAX)
                return -ERANGE;
 
        return 0;
@@ -267,25 +256,25 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                        overflow_check = false;
                case R_AARCH64_MOVW_UABS_G0:
                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
-                                             AARCH64_INSN_IMM_16);
+                                             AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_UABS_G1_NC:
                        overflow_check = false;
                case R_AARCH64_MOVW_UABS_G1:
                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
-                                             AARCH64_INSN_IMM_16);
+                                             AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_UABS_G2_NC:
                        overflow_check = false;
                case R_AARCH64_MOVW_UABS_G2:
                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
-                                             AARCH64_INSN_IMM_16);
+                                             AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_UABS_G3:
                        /* We're using the top bits so we can't overflow. */
                        overflow_check = false;
                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
-                                             AARCH64_INSN_IMM_16);
+                                             AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_SABS_G0:
                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
@@ -302,7 +291,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                case R_AARCH64_MOVW_PREL_G0_NC:
                        overflow_check = false;
                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
-                                             AARCH64_INSN_IMM_MOVK);
+                                             AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_PREL_G0:
                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
@@ -311,7 +300,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                case R_AARCH64_MOVW_PREL_G1_NC:
                        overflow_check = false;
                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
-                                             AARCH64_INSN_IMM_MOVK);
+                                             AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_PREL_G1:
                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
@@ -320,7 +309,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                case R_AARCH64_MOVW_PREL_G2_NC:
                        overflow_check = false;
                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
-                                             AARCH64_INSN_IMM_MOVK);
+                                             AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_PREL_G2:
                        ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
@@ -388,6 +377,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                case R_AARCH64_CALL26:
                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
                                             AARCH64_INSN_IMM_26);
+
+                       if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
+                           ovf == -ERANGE) {
+                               val = module_emit_plt_entry(me, &rel[i], sym);
+                               ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
+                                                    26, AARCH64_INSN_IMM_26);
+                       }
                        break;
 
                default:
diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
new file mode 100644 (file)
index 0000000..8949f6c
--- /dev/null
@@ -0,0 +1,3 @@
+SECTIONS {
+       .plt (NOLOAD) : { BYTE(0) }
+}
index 3aa74830cc69af0053efb77c72e9c26ae3bd6c02..ff4665462a025d4ec2655ca30d49732a63194e53 100644 (file)
@@ -164,8 +164,11 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
        frame.fp = regs->regs[29];
        frame.sp = regs->sp;
        frame.pc = regs->pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       frame.graph = current->curr_ret_stack;
+#endif
 
-       walk_stackframe(&frame, callchain_trace, entry);
+       walk_stackframe(current, &frame, callchain_trace, entry);
 }
 
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
index f75b540bc3b4b0daae4a8773cd0eddf1a86a90aa..80624829db613961b7a088ce18d8591361b448c7 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/notifier.h>
 #include <trace/events/power.h>
 
+#include <asm/alternative.h>
 #include <asm/compat.h>
 #include <asm/cacheflush.h>
 #include <asm/fpsimd.h>
@@ -280,6 +281,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        } else {
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->pstate = PSR_MODE_EL1h;
+               if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+                   cpus_have_cap(ARM64_HAS_UAO))
+                       childregs->pstate |= PSR_UAO_BIT;
                p->thread.cpu_context.x19 = stack_start;
                p->thread.cpu_context.x20 = stk_sz;
        }
@@ -308,6 +312,17 @@ static void tls_thread_switch(struct task_struct *next)
        : : "r" (tpidr), "r" (tpidrro));
 }
 
+/* Restore the UAO state depending on next's addr_limit */
+static void uao_thread_switch(struct task_struct *next)
+{
+       if (IS_ENABLED(CONFIG_ARM64_UAO)) {
+               if (task_thread_info(next)->addr_limit == KERNEL_DS)
+                       asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
+               else
+                       asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
+       }
+}
+
 /*
  * Thread switching.
  */
@@ -320,6 +335,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
        tls_thread_switch(next);
        hw_breakpoint_thread_switch(next);
        contextidr_thread_switch(next);
+       uao_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
@@ -344,11 +360,14 @@ unsigned long get_wchan(struct task_struct *p)
        frame.fp = thread_saved_fp(p);
        frame.sp = thread_saved_sp(p);
        frame.pc = thread_saved_pc(p);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       frame.graph = p->curr_ret_stack;
+#endif
        stack_page = (unsigned long)task_stack_page(p);
        do {
                if (frame.sp < stack_page ||
                    frame.sp >= stack_page + THREAD_SIZE ||
-                   unwind_frame(&frame))
+                   unwind_frame(p, &frame))
                        return 0;
                if (!in_sched_functions(frame.pc))
                        return frame.pc;
index 6c4fd2810ecb35b3e648db18923d21f19a50f422..1718706fde83604f78d81d850bf8827705338f1a 100644 (file)
@@ -43,8 +43,11 @@ void *return_address(unsigned int level)
        frame.fp = (unsigned long)__builtin_frame_address(0);
        frame.sp = current_stack_pointer;
        frame.pc = (unsigned long)return_address; /* dummy */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       frame.graph = current->curr_ret_stack;
+#endif
 
-       walk_stackframe(&frame, save_return_addr, &data);
+       walk_stackframe(current, &frame, save_return_addr, &data);
 
        if (!data.level)
                return data.addr;
index 8119479147db147c33800f76aa0d07c6072e8559..42371f69def3a3afc68a7f1a28d0e819e83c7837 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/memblock.h>
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
+#include <asm/mmu_context.h>
 
 phys_addr_t __fdt_pointer __initdata;
 
@@ -313,6 +314,12 @@ void __init setup_arch(char **cmdline_p)
         */
        local_async_enable();
 
+       /*
+        * TTBR0 is only used for the identity mapping at this stage. Make it
+        * point to zero page to avoid speculatively fetching new entries.
+        */
+       cpu_uninstall_idmap();
+
        efi_init();
        arm64_memblock_init();
 
@@ -381,3 +388,32 @@ static int __init topology_init(void)
        return 0;
 }
 subsys_initcall(topology_init);
+
+/*
+ * Dump out kernel offset information on panic.
+ */
+static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
+                             void *p)
+{
+       u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
+
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
+               pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
+                        kaslr_offset, KIMAGE_VADDR);
+       } else {
+               pr_emerg("Kernel Offset: disabled\n");
+       }
+       return 0;
+}
+
+static struct notifier_block kernel_offset_notifier = {
+       .notifier_call = dump_kernel_offset
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &kernel_offset_notifier);
+       return 0;
+}
+__initcall(register_kernel_offset_dumper);
index f586f7c875e29295b6094efd4967e26cb6d01a99..e33fe33876ab3804f2c6dcd6c5458e576596ef24 100644 (file)
@@ -173,6 +173,9 @@ ENTRY(cpu_resume)
        /* load physical address of identity map page table in x1 */
        adrp    x1, idmap_pg_dir
        mov     sp, x2
+       /* save thread_info */
+       and     x2, x2, #~(THREAD_SIZE - 1)
+       msr     sp_el0, x2
        /*
         * cpu_do_resume expects x0 to contain context physical address
         * pointer and x1 to contain physical address of 1:1 page tables
index b1adc51b2c2e7682212554ba8276b5e7c25fbff5..24cb4f800033bc2b9d5ad49144f915ca4506e6dc 100644 (file)
@@ -70,6 +70,7 @@ enum ipi_msg_type {
        IPI_CPU_STOP,
        IPI_TIMER,
        IPI_IRQ_WORK,
+       IPI_WAKEUP
 };
 
 /*
@@ -149,9 +150,7 @@ asmlinkage void secondary_start_kernel(void)
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
         */
-       cpu_set_reserved_ttbr0();
-       local_flush_tlb_all();
-       cpu_set_default_tcr_t0sz();
+       cpu_uninstall_idmap();
 
        preempt_disable();
        trace_hardirqs_off();
@@ -445,6 +444,17 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
        /* map the logical cpu id to cpu MPIDR */
        cpu_logical_map(cpu_count) = hwid;
 
+       /*
+        * Set-up the ACPI parking protocol cpu entries
+        * while initializing the cpu_logical_map to
+        * avoid parsing MADT entries multiple times for
+        * nothing (ie a valid cpu_logical_map entry should
+        * contain a valid parking protocol data set to
+        * initialize the cpu if the parking protocol is
+        * the only available enable method).
+        */
+       acpi_set_mailbox_entry(cpu_count, processor);
+
        cpu_count++;
 }
 
@@ -627,6 +637,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
        S(IPI_CPU_STOP, "CPU stop interrupts"),
        S(IPI_TIMER, "Timer broadcast interrupts"),
        S(IPI_IRQ_WORK, "IRQ work interrupts"),
+       S(IPI_WAKEUP, "CPU wake-up interrupts"),
 };
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
@@ -670,6 +681,13 @@ void arch_send_call_function_single_ipi(int cpu)
        smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+       smp_cross_call(mask, IPI_WAKEUP);
+}
+#endif
+
 #ifdef CONFIG_IRQ_WORK
 void arch_irq_work_raise(void)
 {
@@ -747,6 +765,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                break;
 #endif
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+       case IPI_WAKEUP:
+               WARN_ONCE(!acpi_parking_protocol_valid(cpu),
+                         "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
+                         cpu);
+               break;
+#endif
+
        default:
                pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
                break;
index ccb6078ed9f20fb55132deb48504df3a3134a784..cfd46c227c8cbd7c57c9b88c1d6189404f0bdcfa 100644 (file)
  */
 #include <linux/kernel.h>
 #include <linux/export.h>
+#include <linux/ftrace.h>
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
 
+#include <asm/irq.h>
 #include <asm/stacktrace.h>
 
 /*
  *     ldp     x29, x30, [sp]
  *     add     sp, sp, #0x10
  */
-int notrace unwind_frame(struct stackframe *frame)
+int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 {
        unsigned long high, low;
        unsigned long fp = frame->fp;
+       unsigned long irq_stack_ptr;
+
+       /*
+        * Switching between stacks is valid when tracing current and in
+        * non-preemptible context.
+        */
+       if (tsk == current && !preemptible())
+               irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+       else
+               irq_stack_ptr = 0;
 
        low  = frame->sp;
-       high = ALIGN(low, THREAD_SIZE);
+       /* irq stacks are not THREAD_SIZE aligned */
+       if (on_irq_stack(frame->sp, raw_smp_processor_id()))
+               high = irq_stack_ptr;
+       else
+               high = ALIGN(low, THREAD_SIZE) - 0x20;
 
-       if (fp < low || fp > high - 0x18 || fp & 0xf)
+       if (fp < low || fp > high || fp & 0xf)
                return -EINVAL;
 
        frame->sp = fp + 0x10;
        frame->fp = *(unsigned long *)(fp);
        frame->pc = *(unsigned long *)(fp + 8);
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       if (tsk && tsk->ret_stack &&
+                       (frame->pc == (unsigned long)return_to_handler)) {
+               /*
+                * This is a case where function graph tracer has
+                * modified a return address (LR) in a stack frame
+                * to hook a function return.
+                * So replace it to an original value.
+                */
+               frame->pc = tsk->ret_stack[frame->graph--].ret;
+       }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+       /*
+        * Check whether we are going to walk through from interrupt stack
+        * to task stack.
+        * If we reach the end of the stack - and its an interrupt stack,
+        * unpack the dummy frame to find the original elr.
+        *
+        * Check the frame->fp we read from the bottom of the irq_stack,
+        * and the original task stack pointer are both in current->stack.
+        */
+       if (frame->sp == irq_stack_ptr) {
+               struct pt_regs *irq_args;
+               unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+
+               if (object_is_on_stack((void *)orig_sp) &&
+                  object_is_on_stack((void *)frame->fp)) {
+                       frame->sp = orig_sp;
+
+                       /* orig_sp is the saved pt_regs, find the elr */
+                       irq_args = (struct pt_regs *)orig_sp;
+                       frame->pc = irq_args->pc;
+               } else {
+                       /*
+                        * This frame has a non-standard format, and we
+                        * didn't fix it, because the data looked wrong.
+                        * Refuse to output this frame.
+                        */
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
-void notrace walk_stackframe(struct stackframe *frame,
+void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
                     int (*fn)(struct stackframe *, void *), void *data)
 {
        while (1) {
@@ -61,7 +120,7 @@ void notrace walk_stackframe(struct stackframe *frame,
 
                if (fn(frame, data))
                        break;
-               ret = unwind_frame(frame);
+               ret = unwind_frame(tsk, frame);
                if (ret < 0)
                        break;
        }
@@ -112,8 +171,11 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
                frame.sp = current_stack_pointer;
                frame.pc = (unsigned long)save_stack_trace_tsk;
        }
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       frame.graph = tsk->curr_ret_stack;
+#endif
 
-       walk_stackframe(&frame, save_trace, &data);
+       walk_stackframe(tsk, &frame, save_trace, &data);
        if (trace->nr_entries < trace->max_entries)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
index 1095aa483a1c28e5387b23895c14d7a1746268a3..66055392f445ef47a7fb3749ca6024df1ea185c9 100644 (file)
@@ -60,7 +60,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
  */
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-       struct mm_struct *mm = current->active_mm;
        int ret;
        unsigned long flags;
 
@@ -87,22 +86,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        ret = __cpu_suspend_enter(arg, fn);
        if (ret == 0) {
                /*
-                * We are resuming from reset with TTBR0_EL1 set to the
-                * idmap to enable the MMU; set the TTBR0 to the reserved
-                * page tables to prevent speculative TLB allocations, flush
-                * the local tlb and set the default tcr_el1.t0sz so that
-                * the TTBR0 address space set-up is properly restored.
-                * If the current active_mm != &init_mm we entered cpu_suspend
-                * with mappings in TTBR0 that must be restored, so we switch
-                * them back to complete the address space configuration
-                * restoration before returning.
+                * We are resuming from reset with the idmap active in TTBR0_EL1.
+                * We must uninstall the idmap and restore the expected MMU
+                * state before we can possibly return to userspace.
                 */
-               cpu_set_reserved_ttbr0();
-               local_flush_tlb_all();
-               cpu_set_default_tcr_t0sz();
-
-               if (mm != &init_mm)
-                       cpu_switch_mm(mm->pgd, mm);
+               cpu_uninstall_idmap();
 
                /*
                 * Restore per-cpu offset before any kernel
index 13339b6ffc1a07839103fca328f1eccd6d185c12..59779699a1a40ef3a1940aa0d878c16164ea5398 100644 (file)
@@ -52,8 +52,11 @@ unsigned long profile_pc(struct pt_regs *regs)
        frame.fp = regs->regs[29];
        frame.sp = regs->sp;
        frame.pc = regs->pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       frame.graph = -1; /* no task info */
+#endif
        do {
-               int ret = unwind_frame(&frame);
+               int ret = unwind_frame(NULL, &frame);
                if (ret < 0)
                        return 0;
        } while (in_lock_functions(frame.pc));
index e9b9b53643936a121e8c73db99373d7e7cab9b48..c5392081b49ba4ac4f48d9a0782442ac888ed222 100644 (file)
@@ -146,17 +146,24 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 {
        struct stackframe frame;
+       unsigned long irq_stack_ptr;
+       int skip;
+
+       /*
+        * Switching between stacks is valid when tracing current and in
+        * non-preemptible context.
+        */
+       if (tsk == current && !preemptible())
+               irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+       else
+               irq_stack_ptr = 0;
 
        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 
        if (!tsk)
                tsk = current;
 
-       if (regs) {
-               frame.fp = regs->regs[29];
-               frame.sp = regs->sp;
-               frame.pc = regs->pc;
-       } else if (tsk == current) {
+       if (tsk == current) {
                frame.fp = (unsigned long)__builtin_frame_address(0);
                frame.sp = current_stack_pointer;
                frame.pc = (unsigned long)dump_backtrace;
@@ -168,21 +175,49 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
                frame.sp = thread_saved_sp(tsk);
                frame.pc = thread_saved_pc(tsk);
        }
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       frame.graph = tsk->curr_ret_stack;
+#endif
 
-       pr_emerg("Call trace:\n");
+       skip = !!regs;
+       printk("Call trace:\n");
        while (1) {
                unsigned long where = frame.pc;
                unsigned long stack;
                int ret;
 
-               dump_backtrace_entry(where);
-               ret = unwind_frame(&frame);
+               /* skip until specified stack frame */
+               if (!skip) {
+                       dump_backtrace_entry(where);
+               } else if (frame.fp == regs->regs[29]) {
+                       skip = 0;
+                       /*
+                        * Mostly, this is the case where this function is
+                        * called in panic/abort. As exception handler's
+                        * stack frame does not contain the corresponding pc
+                        * at which an exception has taken place, use regs->pc
+                        * instead.
+                        */
+                       dump_backtrace_entry(regs->pc);
+               }
+               ret = unwind_frame(tsk, &frame);
                if (ret < 0)
                        break;
                stack = frame.sp;
-               if (in_exception_text(where))
+               if (in_exception_text(where)) {
+                       /*
+                        * If we switched to the irq_stack before calling this
+                        * exception handler, then the pt_regs will be on the
+                        * task stack. The easiest way to tell is if the large
+                        * pt_regs would overlap with the end of the irq_stack.
+                        */
+                       if (stack < irq_stack_ptr &&
+                           (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
+                               stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+
                        dump_mem("", "Exception stack", stack,
                                 stack + sizeof(struct pt_regs), false);
+               }
        }
 }
 
@@ -456,22 +491,22 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 
 void __pte_error(const char *file, int line, unsigned long val)
 {
-       pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
+       pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
 }
 
 void __pmd_error(const char *file, int line, unsigned long val)
 {
-       pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
+       pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
 }
 
 void __pud_error(const char *file, int line, unsigned long val)
 {
-       pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
+       pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
 }
 
 void __pgd_error(const char *file, int line, unsigned long val)
 {
-       pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
+       pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
 }
 
 /* GENERIC_BUG traps */
index 60c1db54b41a251b4d006c17a696aa9d6837067b..82379a70ef03ff4296cb8d7fc04226a3d68e1392 100644 (file)
@@ -21,9 +21,8 @@
 #include <linux/const.h>
 #include <asm/page.h>
 
-       __PAGE_ALIGNED_DATA
-
        .globl vdso_start, vdso_end
+       .section .rodata
        .balign PAGE_SIZE
 vdso_start:
        .incbin "arch/arm64/kernel/vdso/vdso.so"
index 71426a78db123d13e98acf8659d65155ff342a06..e3f6cd740ea346bd887f01e2b46a558d8075d966 100644 (file)
@@ -87,15 +87,16 @@ SECTIONS
                EXIT_CALL
                *(.discard)
                *(.discard.*)
+               *(.interp .dynamic)
        }
 
-       . = PAGE_OFFSET + TEXT_OFFSET;
+       . = KIMAGE_VADDR + TEXT_OFFSET;
 
        .head.text : {
                _text = .;
                HEAD_TEXT
        }
-       ALIGN_DEBUG_RO
+       ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
        .text : {                       /* Real text segment            */
                _stext = .;             /* Text and read-only data      */
                        __exception_text_start = .;
@@ -113,14 +114,12 @@ SECTIONS
                *(.got)                 /* Global offset table          */
        }
 
-       ALIGN_DEBUG_RO
        RO_DATA(PAGE_SIZE)
        EXCEPTION_TABLE(8)
        NOTES
-       ALIGN_DEBUG_RO
-       _etext = .;                     /* End of text and rodata section */
 
        ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+       _etext = .;                     /* End of text and rodata section */
        __init_begin = .;
 
        INIT_TEXT_SECTION(8)
@@ -128,7 +127,6 @@ SECTIONS
                ARM_EXIT_KEEP(EXIT_TEXT)
        }
 
-       ALIGN_DEBUG_RO_MIN(16)
        .init.data : {
                INIT_DATA
                INIT_SETUP(16)
@@ -143,9 +141,6 @@ SECTIONS
 
        PERCPU_SECTION(L1_CACHE_BYTES)
 
-       . = ALIGN(PAGE_SIZE);
-       __init_end = .;
-
        . = ALIGN(4);
        .altinstructions : {
                __alt_instructions = .;
@@ -155,8 +150,25 @@ SECTIONS
        .altinstr_replacement : {
                *(.altinstr_replacement)
        }
+       .rela : ALIGN(8) {
+               __reloc_start = .;
+               *(.rela .rela*)
+               __reloc_end = .;
+       }
+       .dynsym : ALIGN(8) {
+               __dynsym_start = .;
+               *(.dynsym)
+       }
+       .dynstr : {
+               *(.dynstr)
+       }
+       .hash : {
+               *(.hash)
+       }
 
        . = ALIGN(PAGE_SIZE);
+       __init_end = .;
+
        _data = .;
        _sdata = .;
        RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
@@ -190,4 +202,4 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
  */
-ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
index 86c289832272d71ba48786414bb6e4ecb9b9cb14..309e3479dc2c48fb47ca28a44b81e9d06b73e7bf 100644 (file)
@@ -923,7 +923,7 @@ __hyp_panic_str:
        .align  2
 
 /*
- * u64 kvm_call_hyp(void *hypfn, ...);
+ * u64 __kvm_call_hyp(void *hypfn, ...);
  *
  * This is not really a variadic function in the classic C-way and care must
  * be taken when calling this to ensure parameters are passed in registers
@@ -940,10 +940,10 @@ __hyp_panic_str:
  * used to implement __hyp_get_vectors in the same way as in
  * arch/arm64/kernel/hyp_stub.S.
  */
-ENTRY(kvm_call_hyp)
+ENTRY(__kvm_call_hyp)
        hvc     #0
        ret
-ENDPROC(kvm_call_hyp)
+ENDPROC(__kvm_call_hyp)
 
 .macro invalid_vector  label, target
        .align  2
index 1a811ecf71da8a8032a1e8cda8cf686f8fc71189..c86b7909ef312009028c46ba83b375b544d9ae84 100644 (file)
@@ -4,15 +4,16 @@ lib-y         := bitops.o clear_user.o delay.o copy_from_user.o       \
                   memcmp.o strcmp.o strncmp.o strlen.o strnlen.o       \
                   strchr.o strrchr.o
 
-# Tell the compiler to treat all general purpose registers as
-# callee-saved, which allows for efficient runtime patching of the bl
-# instruction in the caller with an atomic instruction when supported by
-# the CPU. Result and argument registers are handled correctly, based on
-# the function prototype.
+# Tell the compiler to treat all general purpose registers (with the
+# exception of the IP registers, which are already handled by the caller
+# in case of a PLT) as callee-saved, which allows for efficient runtime
+# patching of the bl instruction in the caller with an atomic instruction
+# when supported by the CPU. Result and argument registers are handled
+# correctly, based on the function prototype.
 lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
 CFLAGS_atomic_ll_sc.o  := -fcall-used-x0 -ffixed-x1 -ffixed-x2         \
                   -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6          \
                   -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9           \
                   -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12   \
                   -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15   \
-                  -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
+                  -fcall-saved-x18
index a9723c71c52b20adf4e2efe69b2e43163cb2c878..5d1cad3ce6d601aa474ae9c9b8ef4c76a785912e 100644 (file)
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        mov     x2, x1                  // save the size for fixup return
        subs    x1, x1, #8
        b.mi    2f
 1:
-USER(9f, str   xzr, [x0], #8   )
+uao_user_alternative 9f, str, sttr, xzr, x0, 8
        subs    x1, x1, #8
        b.pl    1b
 2:     adds    x1, x1, #4
        b.mi    3f
-USER(9f, str   wzr, [x0], #4   )
+uao_user_alternative 9f, str, sttr, wzr, x0, 4
        sub     x1, x1, #4
 3:     adds    x1, x1, #2
        b.mi    4f
-USER(9f, strh  wzr, [x0], #2   )
+uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
        sub     x1, x1, #2
 4:     adds    x1, x1, #1
        b.mi    5f
-USER(9f, strb  wzr, [x0]       )
+uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:     mov     x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        ret
 ENDPROC(__clear_user)
index 4699cd74f87e4af7bf69da8ce48a88a7f4f69b74..17e8306dca294ecf37fc27355956eb6bcfc5d92e 100644 (file)
@@ -34,7 +34,7 @@
  */
 
        .macro ldrb1 ptr, regB, val
-       USER(9998f, ldrb  \ptr, [\regB], \val)
+       uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
        .endm
 
        .macro strb1 ptr, regB, val
@@ -42,7 +42,7 @@
        .endm
 
        .macro ldrh1 ptr, regB, val
-       USER(9998f, ldrh  \ptr, [\regB], \val)
+       uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
        .endm
 
        .macro strh1 ptr, regB, val
@@ -50,7 +50,7 @@
        .endm
 
        .macro ldr1 ptr, regB, val
-       USER(9998f, ldr \ptr, [\regB], \val)
+       uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
        .endm
 
        .macro str1 ptr, regB, val
@@ -58,7 +58,7 @@
        .endm
 
        .macro ldp1 ptr, regB, regC, val
-       USER(9998f, ldp \ptr, \regB, [\regC], \val)
+       uao_ldp 9998f, \ptr, \regB, \regC, \val
        .endm
 
        .macro stp1 ptr, regB, regC, val
 
 end    .req    x5
 ENTRY(__copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        add     end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        mov     x0, #0                          // Nothing to copy
        ret
index 81c8fc93c100b7be7da17ebf96b1edeeb806671f..f7292dd08c840f27d39874fe7cc08aa89bdfb66d 100644 (file)
  *     x0 - bytes not copied
  */
        .macro ldrb1 ptr, regB, val
-       USER(9998f, ldrb  \ptr, [\regB], \val)
+       uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val
        .endm
 
        .macro strb1 ptr, regB, val
-       USER(9998f, strb \ptr, [\regB], \val)
+       uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
        .endm
 
        .macro ldrh1 ptr, regB, val
-       USER(9998f, ldrh  \ptr, [\regB], \val)
+       uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val
        .endm
 
        .macro strh1 ptr, regB, val
-       USER(9998f, strh \ptr, [\regB], \val)
+       uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
        .endm
 
        .macro ldr1 ptr, regB, val
-       USER(9998f, ldr \ptr, [\regB], \val)
+       uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val
        .endm
 
        .macro str1 ptr, regB, val
-       USER(9998f, str \ptr, [\regB], \val)
+       uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
        .endm
 
        .macro ldp1 ptr, regB, regC, val
-       USER(9998f, ldp \ptr, \regB, [\regC], \val)
+       uao_ldp 9998f, \ptr, \regB, \regC, \val
        .endm
 
        .macro stp1 ptr, regB, regC, val
-       USER(9998f, stp \ptr, \regB, [\regC], \val)
+       uao_stp 9998f, \ptr, \regB, \regC, \val
        .endm
 
 end    .req    x5
 ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        add     end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        mov     x0, #0
        ret
index 512b9a7b980e98bbed9a699107e936e4b1913dca..4c1e700840b6ced5a0b2f868bfb4f37dddc8abc0 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/const.h>
 #include <asm/assembler.h>
 #include <asm/page.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
 
 /*
  * Copy a page from src to dest (both are page aligned)
  *     x1 - src
  */
 ENTRY(copy_page)
-       /* Assume cache line size is 64 bytes. */
-       prfm    pldl1strm, [x1, #64]
-1:     ldp     x2, x3, [x1]
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+       nop
+       nop
+alternative_else
+       # Prefetch two cache lines ahead.
+       prfm    pldl1strm, [x1, #128]
+       prfm    pldl1strm, [x1, #256]
+alternative_endif
+
+       ldp     x2, x3, [x1]
        ldp     x4, x5, [x1, #16]
        ldp     x6, x7, [x1, #32]
        ldp     x8, x9, [x1, #48]
-       add     x1, x1, #64
-       prfm    pldl1strm, [x1, #64]
+       ldp     x10, x11, [x1, #64]
+       ldp     x12, x13, [x1, #80]
+       ldp     x14, x15, [x1, #96]
+       ldp     x16, x17, [x1, #112]
+
+       mov     x18, #(PAGE_SIZE - 128)
+       add     x1, x1, #128
+1:
+       subs    x18, x18, #128
+
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+       nop
+alternative_else
+       prfm    pldl1strm, [x1, #384]
+alternative_endif
+
        stnp    x2, x3, [x0]
+       ldp     x2, x3, [x1]
        stnp    x4, x5, [x0, #16]
+       ldp     x4, x5, [x1, #16]
        stnp    x6, x7, [x0, #32]
+       ldp     x6, x7, [x1, #32]
        stnp    x8, x9, [x0, #48]
-       add     x0, x0, #64
-       tst     x1, #(PAGE_SIZE - 1)
-       b.ne    1b
+       ldp     x8, x9, [x1, #48]
+       stnp    x10, x11, [x0, #64]
+       ldp     x10, x11, [x1, #64]
+       stnp    x12, x13, [x0, #80]
+       ldp     x12, x13, [x1, #80]
+       stnp    x14, x15, [x0, #96]
+       ldp     x14, x15, [x1, #96]
+       stnp    x16, x17, [x0, #112]
+       ldp     x16, x17, [x1, #112]
+
+       add     x0, x0, #128
+       add     x1, x1, #128
+
+       b.gt    1b
+
+       stnp    x2, x3, [x0]
+       stnp    x4, x5, [x0, #16]
+       stnp    x6, x7, [x0, #32]
+       stnp    x8, x9, [x0, #48]
+       stnp    x10, x11, [x0, #64]
+       stnp    x12, x13, [x0, #80]
+       stnp    x14, x15, [x0, #96]
+       stnp    x16, x17, [x0, #112]
+
        ret
 ENDPROC(copy_page)
index 7512bbbc07ac39dbe8c963745281f25c2d60efa4..21faae60f9887ecbbfccb7ba1fb918839d47a291 100644 (file)
@@ -37,7 +37,7 @@
        .endm
 
        .macro strb1 ptr, regB, val
-       USER(9998f, strb \ptr, [\regB], \val)
+       uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val
        .endm
 
        .macro ldrh1 ptr, regB, val
@@ -45,7 +45,7 @@
        .endm
 
        .macro strh1 ptr, regB, val
-       USER(9998f, strh \ptr, [\regB], \val)
+       uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val
        .endm
 
        .macro ldr1 ptr, regB, val
@@ -53,7 +53,7 @@
        .endm
 
        .macro str1 ptr, regB, val
-       USER(9998f, str \ptr, [\regB], \val)
+       uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val
        .endm
 
        .macro ldp1 ptr, regB, regC, val
        .endm
 
        .macro stp1 ptr, regB, regC, val
-       USER(9998f, stp \ptr, \regB, [\regC], \val)
+       uao_stp 9998f, \ptr, \regB, \regC, \val
        .endm
 
 end    .req    x5
 ENTRY(__copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        add     end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        mov     x0, #0
        ret
index cfa44a6adc0ad5ec29f78228196b7e834b65df40..6df07069a0253013e254dbb1206debaa939a3526 100644 (file)
@@ -81,25 +81,31 @@ ENDPROC(__flush_cache_user_range)
 /*
  *     __flush_dcache_area(kaddr, size)
  *
- *     Ensure that the data held in the page kaddr is written back to the
- *     page in question.
+ *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     are cleaned and invalidated to the PoC.
  *
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
 ENTRY(__flush_dcache_area)
-       dcache_line_size x2, x3
-       add     x1, x0, x1
-       sub     x3, x2, #1
-       bic     x0, x0, x3
-1:     dc      civac, x0                       // clean & invalidate D line / unified line
-       add     x0, x0, x2
-       cmp     x0, x1
-       b.lo    1b
-       dsb     sy
+       dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
 ENDPIPROC(__flush_dcache_area)
 
+/*
+ *     __clean_dcache_area_pou(kaddr, size)
+ *
+ *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     are cleaned to the PoU.
+ *
+ *     - kaddr   - kernel address
+ *     - size    - size in question
+ */
+ENTRY(__clean_dcache_area_pou)
+       dcache_by_line_op cvau, ish, x0, x1, x2, x3
+       ret
+ENDPROC(__clean_dcache_area_pou)
+
 /*
  *     __inval_cache_range(start, end)
  *     - start   - start address of region
index e87f53ff5f583aeb47b3ec3187d229df380b2c50..7275628ba59f663489f6f9403d46ca8a5050c6f7 100644 (file)
@@ -187,7 +187,7 @@ switch_mm_fastpath:
 
 static int asids_init(void)
 {
-       int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1), 4);
+       int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4);
 
        switch (fld) {
        default:
index 13bbc3be6f5ab31a24d6d0a03b8f368ea6923ed8..22e4cb4d6f538baa43f7071ad1729dec01216d23 100644 (file)
@@ -24,8 +24,9 @@
 
 void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
 {
+       struct page *page = virt_to_page(kto);
        copy_page(kto, kfrom);
-       __flush_dcache_area(kto, PAGE_SIZE);
+       flush_dcache_page(page);
 }
 EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
 
index 354144e3321843ec1e34993c5a0b37068053e1c9..a6e757cbab7785ed411e919b95c6d13caaf21726 100644 (file)
@@ -40,7 +40,7 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
 static struct gen_pool *atomic_pool;
 
 #define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
-static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
+static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
 
 static int __init early_coherent_pool(char *p)
 {
@@ -896,7 +896,7 @@ static int __iommu_attach_notifier(struct notifier_block *nb,
        return 0;
 }
 
-static int register_iommu_dma_ops_notifier(struct bus_type *bus)
+static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
 {
        struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
        int ret;
index 5a22a119a74c87b4b5b54e114701b3c6eed233e6..6be918478f855021fee88a50fefe1b6642252c81 100644 (file)
@@ -35,7 +35,9 @@ struct addr_marker {
 };
 
 enum address_markers_idx {
-       VMALLOC_START_NR = 0,
+       MODULES_START_NR = 0,
+       MODULES_END_NR,
+       VMALLOC_START_NR,
        VMALLOC_END_NR,
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
        VMEMMAP_START_NR,
@@ -45,12 +47,12 @@ enum address_markers_idx {
        FIXADDR_END_NR,
        PCI_START_NR,
        PCI_END_NR,
-       MODULES_START_NR,
-       MODUELS_END_NR,
        KERNEL_SPACE_NR,
 };
 
 static struct addr_marker address_markers[] = {
+       { MODULES_VADDR,        "Modules start" },
+       { MODULES_END,          "Modules end" },
        { VMALLOC_START,        "vmalloc() Area" },
        { VMALLOC_END,          "vmalloc() End" },
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -61,9 +63,7 @@ static struct addr_marker address_markers[] = {
        { FIXADDR_TOP,          "Fixmap end" },
        { PCI_IO_START,         "PCI I/O start" },
        { PCI_IO_END,           "PCI I/O end" },
-       { MODULES_VADDR,        "Modules start" },
-       { MODULES_END,          "Modules end" },
-       { PAGE_OFFSET,          "Kernel Mapping" },
+       { PAGE_OFFSET,          "Linear Mapping" },
        { -1,                   NULL },
 };
 
@@ -90,6 +90,11 @@ struct prot_bits {
 
 static const struct prot_bits pte_bits[] = {
        {
+               .mask   = PTE_VALID,
+               .val    = PTE_VALID,
+               .set    = " ",
+               .clear  = "F",
+       }, {
                .mask   = PTE_USER,
                .val    = PTE_USER,
                .set    = "USR",
index 79444279ba8c674316e34cfe0861c42021fa92f3..81acd4706878f85d8821f0ff924bff05adc31c97 100644 (file)
@@ -11,7 +11,7 @@ int fixup_exception(struct pt_regs *regs)
 
        fixup = search_exception_tables(instruction_pointer(regs));
        if (fixup)
-               regs->pc = fixup->fixup;
+               regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
 
        return fixup != NULL;
 }
index 40f5522245a2b2ef02a88c51332994d33f4a40cd..9bb45de46085f656f9db556192a41fe1daff28cb 100644 (file)
@@ -242,6 +242,14 @@ out:
        return fault;
 }
 
+static inline int permission_fault(unsigned int esr)
+{
+       unsigned int ec       = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT;
+       unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
+
+       return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+}
+
 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
                                   struct pt_regs *regs)
 {
@@ -275,12 +283,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
                mm_flags |= FAULT_FLAG_WRITE;
        }
 
-       /*
-        * PAN bit set implies the fault happened in kernel space, but not
-        * in the arch's user access functions.
-        */
-       if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
-               goto no_context;
+       if (permission_fault(esr) && (addr < USER_DS)) {
+               if (get_fs() == KERNEL_DS)
+                       die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
+
+               if (!search_exception_tables(regs->pc))
+                       die("Accessing user space memory outside uaccess.h routines", regs, esr);
+       }
 
        /*
         * As per x86, we may deadlock here. However, since the kernel only
@@ -611,3 +620,16 @@ void cpu_enable_pan(void *__unused)
        config_sctlr_el1(SCTLR_EL1_SPAN, 0);
 }
 #endif /* CONFIG_ARM64_PAN */
+
+#ifdef CONFIG_ARM64_UAO
+/*
+ * Kernel threads have fs=KERNEL_DS by default, and don't need to call
+ * set_fs(), devtmpfs in particular relies on this behaviour.
+ * We need to enable the feature at runtime (instead of adding it to
+ * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
+ */
+void cpu_enable_uao(void *__unused)
+{
+       asm(SET_PSTATE_UAO(1));
+}
+#endif /* CONFIG_ARM64_UAO */
index c26b804015e80c46e1380d0a1af7f8f439c55405..46649d6e6c5a5608caa84015d3ce4f09d3d47eee 100644 (file)
@@ -34,19 +34,24 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
                __flush_icache_all();
 }
 
+static void sync_icache_aliases(void *kaddr, unsigned long len)
+{
+       unsigned long addr = (unsigned long)kaddr;
+
+       if (icache_is_aliasing()) {
+               __clean_dcache_area_pou(kaddr, len);
+               __flush_icache_all();
+       } else {
+               flush_icache_range(addr, addr + len);
+       }
+}
+
 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
                                unsigned long uaddr, void *kaddr,
                                unsigned long len)
 {
-       if (vma->vm_flags & VM_EXEC) {
-               unsigned long addr = (unsigned long)kaddr;
-               if (icache_is_aliasing()) {
-                       __flush_dcache_area(kaddr, len);
-                       __flush_icache_all();
-               } else {
-                       flush_icache_range(addr, addr + len);
-               }
-       }
+       if (vma->vm_flags & VM_EXEC)
+               sync_icache_aliases(kaddr, len);
 }
 
 /*
@@ -74,13 +79,11 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
        if (!page_mapping(page))
                return;
 
-       if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
-               __flush_dcache_area(page_address(page),
-                               PAGE_SIZE << compound_order(page));
+       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+               sync_icache_aliases(page_address(page),
+                                   PAGE_SIZE << compound_order(page));
+       else if (icache_is_aivivt())
                __flush_icache_all();
-       } else if (icache_is_aivivt()) {
-               __flush_icache_all();
-       }
 }
 
 /*
index 383b03ff38f850a0b0a000ee4e6450fd6649c7db..da30529bb1f65c9e3d5408b2e28ab31bc2283211 100644 (file)
@@ -41,15 +41,273 @@ int pud_huge(pud_t pud)
 #endif
 }
 
+static int find_num_contig(struct mm_struct *mm, unsigned long addr,
+                          pte_t *ptep, pte_t pte, size_t *pgsize)
+{
+       pgd_t *pgd = pgd_offset(mm, addr);
+       pud_t *pud;
+       pmd_t *pmd;
+
+       *pgsize = PAGE_SIZE;
+       if (!pte_cont(pte))
+               return 1;
+       if (!pgd_present(*pgd)) {
+               VM_BUG_ON(!pgd_present(*pgd));
+               return 1;
+       }
+       pud = pud_offset(pgd, addr);
+       if (!pud_present(*pud)) {
+               VM_BUG_ON(!pud_present(*pud));
+               return 1;
+       }
+       pmd = pmd_offset(pud, addr);
+       if (!pmd_present(*pmd)) {
+               VM_BUG_ON(!pmd_present(*pmd));
+               return 1;
+       }
+       if ((pte_t *)pmd == ptep) {
+               *pgsize = PMD_SIZE;
+               return CONT_PMDS;
+       }
+       return CONT_PTES;
+}
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                           pte_t *ptep, pte_t pte)
+{
+       size_t pgsize;
+       int i;
+       int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
+       unsigned long pfn;
+       pgprot_t hugeprot;
+
+       if (ncontig == 1) {
+               set_pte_at(mm, addr, ptep, pte);
+               return;
+       }
+
+       pfn = pte_pfn(pte);
+       hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+       for (i = 0; i < ncontig; i++) {
+               pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
+                        pte_val(pfn_pte(pfn, hugeprot)));
+               set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+               ptep++;
+               pfn += pgsize >> PAGE_SHIFT;
+               addr += pgsize;
+       }
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+                     unsigned long addr, unsigned long sz)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pte_t *pte = NULL;
+
+       pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
+       pgd = pgd_offset(mm, addr);
+       pud = pud_alloc(mm, pgd, addr);
+       if (!pud)
+               return NULL;
+
+       if (sz == PUD_SIZE) {
+               pte = (pte_t *)pud;
+       } else if (sz == (PAGE_SIZE * CONT_PTES)) {
+               pmd_t *pmd = pmd_alloc(mm, pud, addr);
+
+               WARN_ON(addr & (sz - 1));
+               /*
+                * Note that if this code were ever ported to the
+                * 32-bit arm platform then it will cause trouble in
+                * the case where CONFIG_HIGHPTE is set, since there
+                * will be no pte_unmap() to correspond with this
+                * pte_alloc_map().
+                */
+               pte = pte_alloc_map(mm, NULL, pmd, addr);
+       } else if (sz == PMD_SIZE) {
+               if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
+                   pud_none(*pud))
+                       pte = huge_pmd_share(mm, addr, pud);
+               else
+                       pte = (pte_t *)pmd_alloc(mm, pud, addr);
+       } else if (sz == (PMD_SIZE * CONT_PMDS)) {
+               pmd_t *pmd;
+
+               pmd = pmd_alloc(mm, pud, addr);
+               WARN_ON(addr & (sz - 1));
+               return (pte_t *)pmd;
+       }
+
+       pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
+              sz, pte, pte_val(*pte));
+       return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd = NULL;
+       pte_t *pte = NULL;
+
+       pgd = pgd_offset(mm, addr);
+       pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
+       if (!pgd_present(*pgd))
+               return NULL;
+       pud = pud_offset(pgd, addr);
+       if (!pud_present(*pud))
+               return NULL;
+
+       if (pud_huge(*pud))
+               return (pte_t *)pud;
+       pmd = pmd_offset(pud, addr);
+       if (!pmd_present(*pmd))
+               return NULL;
+
+       if (pte_cont(pmd_pte(*pmd))) {
+               pmd = pmd_offset(
+                       pud, (addr & CONT_PMD_MASK));
+               return (pte_t *)pmd;
+       }
+       if (pmd_huge(*pmd))
+               return (pte_t *)pmd;
+       pte = pte_offset_kernel(pmd, addr);
+       if (pte_present(*pte) && pte_cont(*pte)) {
+               pte = pte_offset_kernel(
+                       pmd, (addr & CONT_PTE_MASK));
+               return pte;
+       }
+       return NULL;
+}
+
+pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+                        struct page *page, int writable)
+{
+       size_t pagesize = huge_page_size(hstate_vma(vma));
+
+       if (pagesize == CONT_PTE_SIZE) {
+               entry = pte_mkcont(entry);
+       } else if (pagesize == CONT_PMD_SIZE) {
+               entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
+       } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
+               pr_warn("%s: unrecognized huge page size 0x%lx\n",
+                       __func__, pagesize);
+       }
+       return entry;
+}
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                             unsigned long addr, pte_t *ptep)
+{
+       pte_t pte;
+
+       if (pte_cont(*ptep)) {
+               int ncontig, i;
+               size_t pgsize;
+               pte_t *cpte;
+               bool is_dirty = false;
+
+               cpte = huge_pte_offset(mm, addr);
+               ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
+               /* save the 1st pte to return */
+               pte = ptep_get_and_clear(mm, addr, cpte);
+               for (i = 1; i < ncontig; ++i) {
+                       /*
+                        * If HW_AFDBM is enabled, then the HW could
+                        * turn on the dirty bit for any of the page
+                        * in the set, so check them all.
+                        */
+                       ++cpte;
+                       if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
+                               is_dirty = true;
+               }
+               if (is_dirty)
+                       return pte_mkdirty(pte);
+               else
+                       return pte;
+       } else {
+               return ptep_get_and_clear(mm, addr, ptep);
+       }
+}
+
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                              unsigned long addr, pte_t *ptep,
+                              pte_t pte, int dirty)
+{
+       pte_t *cpte;
+
+       if (pte_cont(pte)) {
+               int ncontig, i, changed = 0;
+               size_t pgsize = 0;
+               unsigned long pfn = pte_pfn(pte);
+               /* Select all bits except the pfn */
+               pgprot_t hugeprot =
+                       __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
+                                pte_val(pte));
+
+               cpte = huge_pte_offset(vma->vm_mm, addr);
+               pfn = pte_pfn(*cpte);
+               ncontig = find_num_contig(vma->vm_mm, addr, cpte,
+                                         *cpte, &pgsize);
+               for (i = 0; i < ncontig; ++i, ++cpte) {
+                       changed = ptep_set_access_flags(vma, addr, cpte,
+                                                       pfn_pte(pfn,
+                                                               hugeprot),
+                                                       dirty);
+                       pfn += pgsize >> PAGE_SHIFT;
+               }
+               return changed;
+       } else {
+               return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+       }
+}
+
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                            unsigned long addr, pte_t *ptep)
+{
+       if (pte_cont(*ptep)) {
+               int ncontig, i;
+               pte_t *cpte;
+               size_t pgsize = 0;
+
+               cpte = huge_pte_offset(mm, addr);
+               ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
+               for (i = 0; i < ncontig; ++i, ++cpte)
+                       ptep_set_wrprotect(mm, addr, cpte);
+       } else {
+               ptep_set_wrprotect(mm, addr, ptep);
+       }
+}
+
+void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                          unsigned long addr, pte_t *ptep)
+{
+       if (pte_cont(*ptep)) {
+               int ncontig, i;
+               pte_t *cpte;
+               size_t pgsize = 0;
+
+               cpte = huge_pte_offset(vma->vm_mm, addr);
+               ncontig = find_num_contig(vma->vm_mm, addr, cpte,
+                                         *cpte, &pgsize);
+               for (i = 0; i < ncontig; ++i, ++cpte)
+                       ptep_clear_flush(vma, addr, cpte);
+       } else {
+               ptep_clear_flush(vma, addr, ptep);
+       }
+}
+
 static __init int setup_hugepagesz(char *opt)
 {
        unsigned long ps = memparse(opt, &opt);
+
        if (ps == PMD_SIZE) {
                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
        } else if (ps == PUD_SIZE) {
                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
        } else {
-               pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
+               pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
                return 0;
        }
        return 1;
index 4cb98aa8c27b2e89fd32783d45f283e8a152d80e..9db46dfb6afb987c8ee59324fdc28d4e62f4ca61 100644 (file)
 #include <linux/efi.h>
 #include <linux/swiotlb.h>
 
+#include <asm/boot.h>
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 
 #include "mm.h"
 
-phys_addr_t memstart_addr __read_mostly = 0;
+/*
+ * We need to be able to catch inadvertent references to memstart_addr
+ * that occur (potentially in generic code) before arm64_memblock_init()
+ * executes, which assigns it its actual value. So use a default value
+ * that cannot be mistaken for a real physical address.
+ */
+s64 memstart_addr __read_mostly = -1;
 phys_addr_t arm64_dma_phys_limit __read_mostly;
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -58,8 +67,8 @@ static int __init early_initrd(char *p)
        if (*endp == ',') {
                size = memparse(endp + 1, NULL);
 
-               initrd_start = (unsigned long)__va(start);
-               initrd_end = (unsigned long)__va(start + size);
+               initrd_start = start;
+               initrd_end = start + size;
        }
        return 0;
 }
@@ -71,7 +80,7 @@ early_param("initrd", early_initrd);
  * currently assumes that for memory starting above 4G, 32-bit devices will
  * use a DMA offset.
  */
-static phys_addr_t max_zone_dma_phys(void)
+static phys_addr_t __init max_zone_dma_phys(void)
 {
        phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
        return min(offset + (1ULL << 32), memblock_end_of_DRAM());
@@ -126,11 +135,11 @@ EXPORT_SYMBOL(pfn_valid);
 #endif
 
 #ifndef CONFIG_SPARSEMEM
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
 {
 }
 #else
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
 {
        struct memblock_region *reg;
 
@@ -159,7 +168,57 @@ early_param("mem", early_mem);
 
 void __init arm64_memblock_init(void)
 {
-       memblock_enforce_memory_limit(memory_limit);
+       const s64 linear_region_size = -(s64)PAGE_OFFSET;
+
+       /*
+        * Ensure that the linear region takes up exactly half of the kernel
+        * virtual address space. This way, we can distinguish a linear address
+        * from a kernel/module/vmalloc address by testing a single bit.
+        */
+       BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
+
+       /*
+        * Select a suitable value for the base of physical memory.
+        */
+       memstart_addr = round_down(memblock_start_of_DRAM(),
+                                  ARM64_MEMSTART_ALIGN);
+
+       /*
+        * Remove the memory that we will not be able to cover with the
+        * linear mapping. Take care not to clip the kernel which may be
+        * high in memory.
+        */
+       memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
+                       ULLONG_MAX);
+       if (memblock_end_of_DRAM() > linear_region_size)
+               memblock_remove(0, memblock_end_of_DRAM() - linear_region_size);
+
+       /*
+        * Apply the memory limit if it was set. Since the kernel may be loaded
+        * high up in memory, add back the kernel region that must be accessible
+        * via the linear mapping.
+        */
+       if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+               memblock_enforce_memory_limit(memory_limit);
+               memblock_add(__pa(_text), (u64)(_end - _text));
+       }
+
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+               extern u16 memstart_offset_seed;
+               u64 range = linear_region_size -
+                           (memblock_end_of_DRAM() - memblock_start_of_DRAM());
+
+               /*
+                * If the size of the linear region exceeds, by a sufficient
+                * margin, the size of the region that the available physical
+                * memory spans, randomize the linear region as well.
+                */
+               if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+                       range = range / ARM64_MEMSTART_ALIGN + 1;
+                       memstart_addr -= ARM64_MEMSTART_ALIGN *
+                                        ((range * memstart_offset_seed) >> 16);
+               }
+       }
 
        /*
         * Register the kernel text, kernel data, initrd, and initial
@@ -167,8 +226,13 @@ void __init arm64_memblock_init(void)
         */
        memblock_reserve(__pa(_text), _end - _text);
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (initrd_start)
-               memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
+       if (initrd_start) {
+               memblock_reserve(initrd_start, initrd_end - initrd_start);
+
+               /* the generic initrd code expects virtual addresses */
+               initrd_start = __phys_to_virt(initrd_start);
+               initrd_end = __phys_to_virt(initrd_end);
+       }
 #endif
 
        early_init_fdt_scan_reserved_mem();
@@ -302,35 +366,36 @@ void __init mem_init(void)
 #ifdef CONFIG_KASAN
                  "    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n"
 #endif
+                 "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
                  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
+                 "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+                 "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+                 "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n"
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
                  "              0x%16lx - 0x%16lx   (%6ld MB actual)\n"
 #endif
                  "    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n"
                  "    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+                 "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
 #ifdef CONFIG_KASAN
                  MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
 #endif
+                 MLM(MODULES_VADDR, MODULES_END),
                  MLG(VMALLOC_START, VMALLOC_END),
+                 MLK_ROUNDUP(__init_begin, __init_end),
+                 MLK_ROUNDUP(_text, _etext),
+                 MLK_ROUNDUP(_sdata, _edata),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  MLG(VMEMMAP_START,
                      VMEMMAP_START + VMEMMAP_SIZE),
-                 MLM((unsigned long)virt_to_page(PAGE_OFFSET),
+                 MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
                      (unsigned long)virt_to_page(high_memory)),
 #endif
                  MLK(FIXADDR_START, FIXADDR_TOP),
                  MLM(PCI_IO_START, PCI_IO_END),
-                 MLM(MODULES_VADDR, MODULES_END),
-                 MLM(PAGE_OFFSET, (unsigned long)high_memory),
-                 MLK_ROUNDUP(__init_begin, __init_end),
-                 MLK_ROUNDUP(_text, _etext),
-                 MLK_ROUNDUP(_sdata, _edata));
+                 MLM(__phys_to_virt(memblock_start_of_DRAM()),
+                     (unsigned long)high_memory));
 
 #undef MLK
 #undef MLM
@@ -358,9 +423,8 @@ void __init mem_init(void)
 
 void free_initmem(void)
 {
-       fixup_init();
        free_initmem_default(0);
-       free_alternatives_memory();
+       fixup_init();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -381,3 +445,28 @@ static int __init keepinitrd_setup(char *__unused)
 
 __setup("keepinitrd", keepinitrd_setup);
 #endif
+
+/*
+ * Dump out memory limit information on panic.
+ */
+static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
+{
+       if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+               pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
+       } else {
+               pr_emerg("Memory Limit: none\n");
+       }
+       return 0;
+}
+
+static struct notifier_block mem_limit_notifier = {
+       .notifier_call = dump_mem_limit,
+};
+
+static int __init register_mem_limit_dumper(void)
+{
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &mem_limit_notifier);
+       return 0;
+}
+__initcall(register_mem_limit_dumper);
index cf038c7d9fa994c7d86e05920ffa8961aecc4ad8..757009daa9ede454abccbcab6b3a0421a355b49c 100644 (file)
 #include <linux/memblock.h>
 #include <linux/start_kernel.h>
 
+#include <asm/mmu_context.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 #include <asm/tlbflush.h>
 
 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
@@ -32,7 +35,7 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
        if (pmd_none(*pmd))
                pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
 
-       pte = pte_offset_kernel(pmd, addr);
+       pte = pte_offset_kimg(pmd, addr);
        do {
                next = addr + PAGE_SIZE;
                set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
@@ -50,7 +53,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
        if (pud_none(*pud))
                pud_populate(&init_mm, pud, kasan_zero_pmd);
 
-       pmd = pmd_offset(pud, addr);
+       pmd = pmd_offset_kimg(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
                kasan_early_pte_populate(pmd, addr, next);
@@ -67,7 +70,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
        if (pgd_none(*pgd))
                pgd_populate(&init_mm, pgd, kasan_zero_pud);
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset_kimg(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
                kasan_early_pmd_populate(pud, addr, next);
@@ -96,6 +99,21 @@ asmlinkage void __init kasan_early_init(void)
        kasan_map_early_shadow();
 }
 
+/*
+ * Copy the current shadow region into a new pgdir.
+ */
+void __init kasan_copy_shadow(pgd_t *pgdir)
+{
+       pgd_t *pgd, *pgd_new, *pgd_end;
+
+       pgd = pgd_offset_k(KASAN_SHADOW_START);
+       pgd_end = pgd_offset_k(KASAN_SHADOW_END);
+       pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+       do {
+               set_pgd(pgd_new, *pgd);
+       } while (pgd++, pgd_new++, pgd != pgd_end);
+}
+
 static void __init clear_pgds(unsigned long start,
                        unsigned long end)
 {
@@ -108,18 +126,18 @@ static void __init clear_pgds(unsigned long start,
                set_pgd(pgd_offset_k(start), __pgd(0));
 }
 
-static void __init cpu_set_ttbr1(unsigned long ttbr1)
-{
-       asm(
-       "       msr     ttbr1_el1, %0\n"
-       "       isb"
-       :
-       : "r" (ttbr1));
-}
-
 void __init kasan_init(void)
 {
+       u64 kimg_shadow_start, kimg_shadow_end;
+       u64 mod_shadow_start, mod_shadow_end;
        struct memblock_region *reg;
+       int i;
+
+       kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
+       kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
+
+       mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
+       mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
 
        /*
         * We are going to perform proper setup of shadow memory.
@@ -129,13 +147,33 @@ void __init kasan_init(void)
         * setup will be finished.
         */
        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
-       cpu_set_ttbr1(__pa(tmp_pg_dir));
-       flush_tlb_all();
+       dsb(ishst);
+       cpu_replace_ttbr1(tmp_pg_dir);
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
+       vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
+                        pfn_to_nid(virt_to_pfn(_text)));
+
+       /*
+        * vmemmap_populate() has populated the shadow region that covers the
+        * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
+        * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
+        * kasan_populate_zero_shadow() from replacing the page table entries
+        * (PMD or PTE) at the edges of the shadow region for the kernel
+        * image.
+        */
+       kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
+       kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
+
        kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
-                       kasan_mem_to_shadow((void *)MODULES_VADDR));
+                                  (void *)mod_shadow_start);
+       kasan_populate_zero_shadow((void *)kimg_shadow_end,
+                                  kasan_mem_to_shadow((void *)PAGE_OFFSET));
+
+       if (kimg_shadow_start > mod_shadow_end)
+               kasan_populate_zero_shadow((void *)mod_shadow_end,
+                                          (void *)kimg_shadow_start);
 
        for_each_memblock(memory, reg) {
                void *start = (void *)__phys_to_virt(reg->base);
@@ -155,9 +193,16 @@ void __init kasan_init(void)
                                pfn_to_nid(virt_to_pfn(start)));
        }
 
+       /*
+        * KAsan may reuse the contents of kasan_zero_pte directly, so we
+        * should make sure that it maps the zero page read-only.
+        */
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               set_pte(&kasan_zero_pte[i],
+                       pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+
        memset(kasan_zero_page, 0, PAGE_SIZE);
-       cpu_set_ttbr1(__pa(swapper_pg_dir));
-       flush_tlb_all();
+       cpu_replace_ttbr1(swapper_pg_dir);
 
        /* At this point kasan is fully initialized. Enable error messages */
        init_task.kasan_depth = 0;
index 116ad654dd593d18a07d68fd61875dd2de614222..41421c724fb9eb27a28ede211b0775af97b1fc32 100644 (file)
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
 
+#include <asm/barrier.h>
 #include <asm/cputype.h>
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 
+u64 kimage_voffset __read_mostly;
+EXPORT_SYMBOL(kimage_voffset);
+
 /*
  * Empty_zero_page is a special page that is used for zero-initialized data
  * and COW.
  */
-struct page *empty_zero_page;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
 
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                              unsigned long size, pgprot_t vma_prot)
 {
@@ -62,16 +71,30 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 }
 EXPORT_SYMBOL(phys_mem_access_prot);
 
-static void __init *early_alloc(unsigned long sz)
+static phys_addr_t __init early_pgtable_alloc(void)
 {
        phys_addr_t phys;
        void *ptr;
 
-       phys = memblock_alloc(sz, sz);
+       phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
        BUG_ON(!phys);
-       ptr = __va(phys);
-       memset(ptr, 0, sz);
-       return ptr;
+
+       /*
+        * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
+        * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
+        * any level of table.
+        */
+       ptr = pte_set_fixmap(phys);
+
+       memset(ptr, 0, PAGE_SIZE);
+
+       /*
+        * Implicit barriers also ensure the zeroed page is visible to the page
+        * table walker
+        */
+       pte_clear_fixmap();
+
+       return phys;
 }
 
 /*
@@ -95,24 +118,30 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, unsigned long pfn,
                                  pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pte_t *pte;
 
        if (pmd_none(*pmd) || pmd_sect(*pmd)) {
-               pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
+               phys_addr_t pte_phys;
+               BUG_ON(!pgtable_alloc);
+               pte_phys = pgtable_alloc();
+               pte = pte_set_fixmap(pte_phys);
                if (pmd_sect(*pmd))
                        split_pmd(pmd, pte);
-               __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
+               __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
                flush_tlb_all();
+               pte_clear_fixmap();
        }
        BUG_ON(pmd_bad(*pmd));
 
-       pte = pte_offset_kernel(pmd, addr);
+       pte = pte_set_fixmap_offset(pmd, addr);
        do {
                set_pte(pte, pfn_pte(pfn, prot));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       pte_clear_fixmap();
 }
 
 static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -127,10 +156,29 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
        } while (pmd++, i++, i < PTRS_PER_PMD);
 }
 
-static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
-                                 unsigned long addr, unsigned long end,
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+
+       /*
+        * If debug_page_alloc is enabled we must map the linear map
+        * using pages. However, other mappings created by
+        * create_mapping_noalloc must use sections in some cases. Allow
+        * sections to be used in those cases, where no pgtable_alloc
+        * function is provided.
+        */
+       return !pgtable_alloc || !debug_pagealloc_enabled();
+}
+#else
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+       return true;
+}
+#endif
+
+static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pmd_t *pmd;
        unsigned long next;
@@ -139,7 +187,10 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
         * Check for initial section mappings in the pgd/pud and remove them.
         */
        if (pud_none(*pud) || pud_sect(*pud)) {
-               pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
+               phys_addr_t pmd_phys;
+               BUG_ON(!pgtable_alloc);
+               pmd_phys = pgtable_alloc();
+               pmd = pmd_set_fixmap(pmd_phys);
                if (pud_sect(*pud)) {
                        /*
                         * need to have the 1G of mappings continue to be
@@ -147,19 +198,20 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
                         */
                        split_pud(pud, pmd);
                }
-               pud_populate(mm, pud, pmd);
+               __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
                flush_tlb_all();
+               pmd_clear_fixmap();
        }
        BUG_ON(pud_bad(*pud));
 
-       pmd = pmd_offset(pud, addr);
+       pmd = pmd_set_fixmap_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
                /* try section mapping first */
-               if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+               if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+                     block_mappings_allowed(pgtable_alloc)) {
                        pmd_t old_pmd =*pmd;
-                       set_pmd(pmd, __pmd(phys |
-                                          pgprot_val(mk_sect_prot(prot))));
+                       pmd_set_huge(pmd, phys, prot);
                        /*
                         * Check for previous table entries created during
                         * boot (__create_page_tables) and flush them.
@@ -167,17 +219,19 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
                        if (!pmd_none(old_pmd)) {
                                flush_tlb_all();
                                if (pmd_table(old_pmd)) {
-                                       phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
+                                       phys_addr_t table = pmd_page_paddr(old_pmd);
                                        if (!WARN_ON_ONCE(slab_is_available()))
                                                memblock_free(table, PAGE_SIZE);
                                }
                        }
                } else {
                        alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
-                                      prot, alloc);
+                                      prot, pgtable_alloc);
                }
                phys += next - addr;
        } while (pmd++, addr = next, addr != end);
+
+       pmd_clear_fixmap();
 }
 
 static inline bool use_1G_block(unsigned long addr, unsigned long next,
@@ -192,31 +246,32 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
        return true;
 }
 
-static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
-                                 unsigned long addr, unsigned long end,
+static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pud_t *pud;
        unsigned long next;
 
        if (pgd_none(*pgd)) {
-               pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
-               pgd_populate(mm, pgd, pud);
+               phys_addr_t pud_phys;
+               BUG_ON(!pgtable_alloc);
+               pud_phys = pgtable_alloc();
+               __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
        }
        BUG_ON(pgd_bad(*pgd));
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_set_fixmap_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
 
                /*
                 * For 4K granule only, attempt to put down a 1GB block
                 */
-               if (use_1G_block(addr, next, phys)) {
+               if (use_1G_block(addr, next, phys) &&
+                   block_mappings_allowed(pgtable_alloc)) {
                        pud_t old_pud = *pud;
-                       set_pud(pud, __pud(phys |
-                                          pgprot_val(mk_sect_prot(prot))));
+                       pud_set_huge(pud, phys, prot);
 
                        /*
                         * If we have an old value for a pud, it will
@@ -228,51 +283,74 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
                        if (!pud_none(old_pud)) {
                                flush_tlb_all();
                                if (pud_table(old_pud)) {
-                                       phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+                                       phys_addr_t table = pud_page_paddr(old_pud);
                                        if (!WARN_ON_ONCE(slab_is_available()))
                                                memblock_free(table, PAGE_SIZE);
                                }
                        }
                } else {
-                       alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
+                       alloc_init_pmd(pud, addr, next, phys, prot,
+                                      pgtable_alloc);
                }
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
+
+       pud_clear_fixmap();
 }
 
 /*
  * Create the page directory entries and any necessary page tables for the
  * mapping specified by 'md'.
  */
-static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
-                                   phys_addr_t phys, unsigned long virt,
+static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
                                    phys_addr_t size, pgprot_t prot,
-                                   void *(*alloc)(unsigned long size))
+                                   phys_addr_t (*pgtable_alloc)(void))
 {
        unsigned long addr, length, end, next;
 
+       /*
+        * If the virtual and physical address don't have the same offset
+        * within a page, we cannot map the region as the caller expects.
+        */
+       if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
+               return;
+
+       phys &= PAGE_MASK;
        addr = virt & PAGE_MASK;
        length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
 
        end = addr + length;
        do {
                next = pgd_addr_end(addr, end);
-               alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
+               alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
                phys += next - addr;
        } while (pgd++, addr = next, addr != end);
 }
 
-static void *late_alloc(unsigned long size)
+static phys_addr_t late_pgtable_alloc(void)
 {
-       void *ptr;
-
-       BUG_ON(size > PAGE_SIZE);
-       ptr = (void *)__get_free_page(PGALLOC_GFP);
+       void *ptr = (void *)__get_free_page(PGALLOC_GFP);
        BUG_ON(!ptr);
-       return ptr;
+
+       /* Ensure the zeroed page is visible to the page table walker */
+       dsb(ishst);
+       return __pa(ptr);
+}
+
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+                                unsigned long virt, phys_addr_t size,
+                                pgprot_t prot,
+                                phys_addr_t (*alloc)(void))
+{
+       init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
 }
 
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+/*
+ * This function can only be used to modify existing table entries,
+ * without allocating new levels of table. Note that this permits the
+ * creation of new section or page entries.
+ */
+static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
                                  phys_addr_t size, pgprot_t prot)
 {
        if (virt < VMALLOC_START) {
@@ -280,16 +358,16 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
                        &phys, virt);
                return;
        }
-       __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
-                        size, prot, early_alloc);
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+                            NULL);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
                               unsigned long virt, phys_addr_t size,
                               pgprot_t prot)
 {
-       __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
-                               late_alloc);
+       __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
+                            late_pgtable_alloc);
 }
 
 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -301,69 +379,57 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
                return;
        }
 
-       return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
-                               phys, virt, size, prot, late_alloc);
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+                            late_pgtable_alloc);
 }
 
-#ifdef CONFIG_DEBUG_RODATA
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
+       unsigned long kernel_start = __pa(_stext);
+       unsigned long kernel_end = __pa(_etext);
+
        /*
-        * Set up the executable regions using the existing section mappings
-        * for now. This will get more fine grained later once all memory
-        * is mapped
+        * Take care not to create a writable alias for the
+        * read-only text and rodata sections of the kernel image.
         */
-       unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
-
-       if (end < kernel_x_start) {
-               create_mapping(start, __phys_to_virt(start),
-                       end - start, PAGE_KERNEL);
-       } else if (start >= kernel_x_end) {
-               create_mapping(start, __phys_to_virt(start),
-                       end - start, PAGE_KERNEL);
-       } else {
-               if (start < kernel_x_start)
-                       create_mapping(start, __phys_to_virt(start),
-                               kernel_x_start - start,
-                               PAGE_KERNEL);
-               create_mapping(kernel_x_start,
-                               __phys_to_virt(kernel_x_start),
-                               kernel_x_end - kernel_x_start,
-                               PAGE_KERNEL_EXEC);
-               if (kernel_x_end < end)
-                       create_mapping(kernel_x_end,
-                               __phys_to_virt(kernel_x_end),
-                               end - kernel_x_end,
-                               PAGE_KERNEL);
+
+       /* No overlap with the kernel text */
+       if (end < kernel_start || start >= kernel_end) {
+               __create_pgd_mapping(pgd, start, __phys_to_virt(start),
+                                    end - start, PAGE_KERNEL,
+                                    early_pgtable_alloc);
+               return;
        }
 
+       /*
+        * This block overlaps the kernel text mapping.
+        * Map the portion(s) which don't overlap.
+        */
+       if (start < kernel_start)
+               __create_pgd_mapping(pgd, start,
+                                    __phys_to_virt(start),
+                                    kernel_start - start, PAGE_KERNEL,
+                                    early_pgtable_alloc);
+       if (kernel_end < end)
+               __create_pgd_mapping(pgd, kernel_end,
+                                    __phys_to_virt(kernel_end),
+                                    end - kernel_end, PAGE_KERNEL,
+                                    early_pgtable_alloc);
+
+       /*
+        * Map the linear alias of the [_stext, _etext) interval as
+        * read-only/non-executable. This makes the contents of the
+        * region accessible to subsystems such as hibernate, but
+        * protects it from inadvertent modification or execution.
+        */
+       __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
+                            kernel_end - kernel_start, PAGE_KERNEL_RO,
+                            early_pgtable_alloc);
 }
-#else
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
-{
-       create_mapping(start, __phys_to_virt(start), end - start,
-                       PAGE_KERNEL_EXEC);
-}
-#endif
 
-static void __init map_mem(void)
+static void __init map_mem(pgd_t *pgd)
 {
        struct memblock_region *reg;
-       phys_addr_t limit;
-
-       /*
-        * Temporarily limit the memblock range. We need to do this as
-        * create_mapping requires puds, pmds and ptes to be allocated from
-        * memory addressable from the initial direct kernel mapping.
-        *
-        * The initial direct kernel mapping, located at swapper_pg_dir, gives
-        * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
-        * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
-        * per Documentation/arm64/booting.txt).
-        */
-       limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
-       memblock_set_current_limit(limit);
 
        /* map all the memory banks */
        for_each_memblock(memory, reg) {
@@ -373,69 +439,87 @@ static void __init map_mem(void)
                if (start >= end)
                        break;
 
-               if (ARM64_SWAPPER_USES_SECTION_MAPS) {
-                       /*
-                        * For the first memory bank align the start address and
-                        * current memblock limit to prevent create_mapping() from
-                        * allocating pte page tables from unmapped memory. With
-                        * the section maps, if the first block doesn't end on section
-                        * size boundary, create_mapping() will try to allocate a pte
-                        * page, which may be returned from an unmapped area.
-                        * When section maps are not used, the pte page table for the
-                        * current limit is already present in swapper_pg_dir.
-                        */
-                       if (start < limit)
-                               start = ALIGN(start, SECTION_SIZE);
-                       if (end < limit) {
-                               limit = end & SECTION_MASK;
-                               memblock_set_current_limit(limit);
-                       }
-               }
-               __map_memblock(start, end);
+               __map_memblock(pgd, start, end);
        }
-
-       /* Limit no longer required. */
-       memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
 }
 
-static void __init fixup_executable(void)
+void mark_rodata_ro(void)
 {
-#ifdef CONFIG_DEBUG_RODATA
-       /* now that we are actually fully mapped, make the start/end more fine grained */
-       if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
-               unsigned long aligned_start = round_down(__pa(_stext),
-                                                        SWAPPER_BLOCK_SIZE);
+       if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
+               return;
 
-               create_mapping(aligned_start, __phys_to_virt(aligned_start),
-                               __pa(_stext) - aligned_start,
-                               PAGE_KERNEL);
-       }
+       create_mapping_late(__pa(_stext), (unsigned long)_stext,
+                               (unsigned long)_etext - (unsigned long)_stext,
+                               PAGE_KERNEL_ROX);
+}
 
-       if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
-               unsigned long aligned_end = round_up(__pa(__init_end),
-                                                         SWAPPER_BLOCK_SIZE);
-               create_mapping(__pa(__init_end), (unsigned long)__init_end,
-                               aligned_end - __pa(__init_end),
-                               PAGE_KERNEL);
-       }
-#endif
+void fixup_init(void)
+{
+       /*
+        * Unmap the __init region but leave the VM area in place. This
+        * prevents the region from being reused for kernel modules, which
+        * is not supported by kallsyms.
+        */
+       unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
 }
 
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void)
+static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
+                                   pgprot_t prot, struct vm_struct *vma)
 {
-       create_mapping_late(__pa(_stext), (unsigned long)_stext,
-                               (unsigned long)_etext - (unsigned long)_stext,
-                               PAGE_KERNEL_ROX);
+       phys_addr_t pa_start = __pa(va_start);
+       unsigned long size = va_end - va_start;
+
+       BUG_ON(!PAGE_ALIGNED(pa_start));
+       BUG_ON(!PAGE_ALIGNED(size));
+
+       __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
+                            early_pgtable_alloc);
 
+       vma->addr       = va_start;
+       vma->phys_addr  = pa_start;
+       vma->size       = size;
+       vma->flags      = VM_MAP;
+       vma->caller     = __builtin_return_address(0);
+
+       vm_area_add_early(vma);
 }
-#endif
 
-void fixup_init(void)
+/*
+ * Create fine-grained mappings for the kernel.
+ */
+static void __init map_kernel(pgd_t *pgd)
 {
-       create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
-                       (unsigned long)__init_end - (unsigned long)__init_begin,
-                       PAGE_KERNEL);
+       static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
+
+       map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+       map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
+                        &vmlinux_init);
+       map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
+
+       if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
+               /*
+                * The fixmap falls in a separate pgd to the kernel, and doesn't
+                * live in the carveout for the swapper_pg_dir. We can simply
+                * re-use the existing dir for the fixmap.
+                */
+               set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
+                       *pgd_offset_k(FIXADDR_START));
+       } else if (CONFIG_PGTABLE_LEVELS > 3) {
+               /*
+                * The fixmap shares its top level pgd entry with the kernel
+                * mapping. This can really only occur when we are running
+                * with 16k/4 levels, so we can simply reuse the pud level
+                * entry instead.
+                */
+               BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+               set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
+                       __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+               pud_clear_fixmap();
+       } else {
+               BUG();
+       }
+
+       kasan_copy_shadow(pgd);
 }
 
 /*
@@ -444,28 +528,35 @@ void fixup_init(void)
  */
 void __init paging_init(void)
 {
-       void *zero_page;
+       phys_addr_t pgd_phys = early_pgtable_alloc();
+       pgd_t *pgd = pgd_set_fixmap(pgd_phys);
 
-       map_mem();
-       fixup_executable();
+       map_kernel(pgd);
+       map_mem(pgd);
 
-       /* allocate the zero page. */
-       zero_page = early_alloc(PAGE_SIZE);
-
-       bootmem_init();
-
-       empty_zero_page = virt_to_page(zero_page);
+       /*
+        * We want to reuse the original swapper_pg_dir so we don't have to
+        * communicate the new address to non-coherent secondaries in
+        * secondary_entry, and so cpu_switch_mm can generate the address with
+        * adrp+add rather than a load from some global variable.
+        *
+        * To do this we need to go via a temporary pgd.
+        */
+       cpu_replace_ttbr1(__va(pgd_phys));
+       memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
+       cpu_replace_ttbr1(swapper_pg_dir);
 
-       /* Ensure the zero page is visible to the page table walker */
-       dsb(ishst);
+       pgd_clear_fixmap();
+       memblock_free(pgd_phys, PAGE_SIZE);
 
        /*
-        * TTBR0 is only used for the identity mapping at this stage. Make it
-        * point to zero page to avoid speculatively fetching new entries.
+        * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
+        * allocated with it.
         */
-       cpu_set_reserved_ttbr0();
-       local_flush_tlb_all();
-       cpu_set_default_tcr_t0sz();
+       memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
+                     SWAPPER_DIR_SIZE - PAGE_SIZE);
+
+       bootmem_init();
 }
 
 /*
@@ -552,21 +643,13 @@ void vmemmap_free(unsigned long start, unsigned long end)
 }
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_PGTABLE_LEVELS > 2
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
-#endif
-#if CONFIG_PGTABLE_LEVELS > 3
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
-#endif
-
 static inline pud_t * fixmap_pud(unsigned long addr)
 {
        pgd_t *pgd = pgd_offset_k(addr);
 
        BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
 
-       return pud_offset(pgd, addr);
+       return pud_offset_kimg(pgd, addr);
 }
 
 static inline pmd_t * fixmap_pmd(unsigned long addr)
@@ -575,16 +658,12 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
 
        BUG_ON(pud_none(*pud) || pud_bad(*pud));
 
-       return pmd_offset(pud, addr);
+       return pmd_offset_kimg(pud, addr);
 }
 
 static inline pte_t * fixmap_pte(unsigned long addr)
 {
-       pmd_t *pmd = fixmap_pmd(addr);
-
-       BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
-
-       return pte_offset_kernel(pmd, addr);
+       return &bm_pte[pte_index(addr)];
 }
 
 void __init early_fixmap_init(void)
@@ -595,15 +674,26 @@ void __init early_fixmap_init(void)
        unsigned long addr = FIXADDR_START;
 
        pgd = pgd_offset_k(addr);
-       pgd_populate(&init_mm, pgd, bm_pud);
-       pud = pud_offset(pgd, addr);
+       if (CONFIG_PGTABLE_LEVELS > 3 &&
+           !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
+               /*
+                * We only end up here if the kernel mapping and the fixmap
+                * share the top level pgd entry, which should only happen on
+                * 16k/4 levels configurations.
+                */
+               BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+               pud = pud_offset_kimg(pgd, addr);
+       } else {
+               pgd_populate(&init_mm, pgd, bm_pud);
+               pud = fixmap_pud(addr);
+       }
        pud_populate(&init_mm, pud, bm_pmd);
-       pmd = pmd_offset(pud, addr);
+       pmd = fixmap_pmd(addr);
        pmd_populate_kernel(&init_mm, pmd, bm_pte);
 
        /*
         * The boot-ioremap range spans multiple pmds, for which
-        * we are not preparted:
+        * we are not prepared:
         */
        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
@@ -642,11 +732,10 @@ void __set_fixmap(enum fixed_addresses idx,
        }
 }
 
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
 {
        const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
-       pgprot_t prot = PAGE_KERNEL_RO;
-       int size, offset;
+       int offset;
        void *dt_virt;
 
        /*
@@ -663,7 +752,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
        /*
         * Make sure that the FDT region can be mapped without the need to
         * allocate additional translation table pages, so that it is safe
-        * to call create_mapping() this early.
+        * to call create_mapping_noalloc() this early.
         *
         * On 64k pages, the FDT will be mapped using PTEs, so we need to
         * be in the same PMD as the rest of the fixmap.
@@ -679,21 +768,73 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
        dt_virt = (void *)dt_virt_base + offset;
 
        /* map the first chunk so we can read the size from the header */
-       create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
-                      SWAPPER_BLOCK_SIZE, prot);
+       create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+                       dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
 
        if (fdt_check_header(dt_virt) != 0)
                return NULL;
 
-       size = fdt_totalsize(dt_virt);
-       if (size > MAX_FDT_SIZE)
+       *size = fdt_totalsize(dt_virt);
+       if (*size > MAX_FDT_SIZE)
                return NULL;
 
-       if (offset + size > SWAPPER_BLOCK_SIZE)
-               create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
-                              round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
+       if (offset + *size > SWAPPER_BLOCK_SIZE)
+               create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+                              round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
 
-       memblock_reserve(dt_phys, size);
+       return dt_virt;
+}
 
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+{
+       void *dt_virt;
+       int size;
+
+       dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
+       if (!dt_virt)
+               return NULL;
+
+       memblock_reserve(dt_phys, size);
        return dt_virt;
 }
+
+int __init arch_ioremap_pud_supported(void)
+{
+       /* only 4k granule supports level 1 block mappings */
+       return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+       return 1;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+{
+       BUG_ON(phys & ~PUD_MASK);
+       set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+       return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+{
+       BUG_ON(phys & ~PMD_MASK);
+       set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+       return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+       if (!pud_sect(*pud))
+               return 0;
+       pud_clear(pud);
+       return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+       if (!pmd_sect(*pmd))
+               return 0;
+       pmd_clear(pmd);
+       return 1;
+}
index cf6240741134ecbeece606dade39dc90478a08b6..ca6d268e3313229b0941ce7d33439c7a4c861120 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -36,14 +37,32 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
        return 0;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+                               pgprot_t set_mask, pgprot_t clear_mask)
+{
+       struct page_change_data data;
+       int ret;
+
+       data.set_mask = set_mask;
+       data.clear_mask = clear_mask;
+
+       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+                                       &data);
+
+       flush_tlb_kernel_range(start, start + size);
+       return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
                                pgprot_t set_mask, pgprot_t clear_mask)
 {
        unsigned long start = addr;
        unsigned long size = PAGE_SIZE*numpages;
        unsigned long end = start + size;
-       int ret;
-       struct page_change_data data;
+       struct vm_struct *area;
 
        if (!PAGE_ALIGNED(addr)) {
                start &= PAGE_MASK;
@@ -51,23 +70,29 @@ static int change_memory_common(unsigned long addr, int numpages,
                WARN_ON_ONCE(1);
        }
 
-       if (start < MODULES_VADDR || start >= MODULES_END)
-               return -EINVAL;
-
-       if (end < MODULES_VADDR || end >= MODULES_END)
+       /*
+        * Kernel VA mappings are always live, and splitting live section
+        * mappings into page mappings may cause TLB conflicts. This means
+        * we have to ensure that changing the permission bits of the range
+        * we are operating on does not result in such splitting.
+        *
+        * Let's restrict ourselves to mappings created by vmalloc (or vmap).
+        * Those are guaranteed to consist entirely of page mappings, and
+        * splitting is never needed.
+        *
+        * So check whether the [addr, addr + size) interval is entirely
+        * covered by precisely one VM area that has the VM_ALLOC flag set.
+        */
+       area = find_vm_area((void *)addr);
+       if (!area ||
+           end > (unsigned long)area->addr + area->size ||
+           !(area->flags & VM_ALLOC))
                return -EINVAL;
 
        if (!numpages)
                return 0;
 
-       data.set_mask = set_mask;
-       data.clear_mask = clear_mask;
-
-       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-                                       &data);
-
-       flush_tlb_kernel_range(start, end);
-       return ret;
+       return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
@@ -99,3 +124,19 @@ int set_memory_x(unsigned long addr, int numpages)
                                        __pgprot(PTE_PXN));
 }
 EXPORT_SYMBOL_GPL(set_memory_x);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       unsigned long addr = (unsigned long) page_address(page);
+
+       if (enable)
+               __change_memory_common(addr, PAGE_SIZE * numpages,
+                                       __pgprot(PTE_VALID),
+                                       __pgprot(0));
+       else
+               __change_memory_common(addr, PAGE_SIZE * numpages,
+                                       __pgprot(0),
+                                       __pgprot(PTE_VALID));
+}
+#endif
index cb3ba1b812e74dcd1acbc167756d60da331d105f..ae11d4e03d0e68d7f0fe621f1c9d313fcab09127 100644 (file)
@@ -46,14 +46,14 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
                kmem_cache_free(pgd_cache, pgd);
 }
 
-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
 {
+       if (PGD_SIZE == PAGE_SIZE)
+               return;
+
        /*
         * Naturally aligned pgds required by the architecture.
         */
-       if (PGD_SIZE != PAGE_SIZE)
-               pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
-                                             SLAB_PANIC, NULL);
-       return 0;
+       pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+                                     SLAB_PANIC, NULL);
 }
-core_initcall(pgd_cache_init);
index d69dffffaa8993bc7260c1df035b92c3b3867b48..984edcda1850f1be420f92be7666b36c687cc476 100644 (file)
        msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
 9000:
        .endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
+ * [kaddr, kaddr + size)
+ *
+ *     op:             operation passed to dc instruction
+ *     domain:         domain used in dsb instruciton
+ *     kaddr:          starting virtual address of the region
+ *     size:           size of the region
+ *     Corrupts:       kaddr, size, tmp1, tmp2
+ */
+       .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+       dcache_line_size \tmp1, \tmp2
+       add     \size, \kaddr, \size
+       sub     \tmp2, \tmp1, #1
+       bic     \kaddr, \kaddr, \tmp2
+9998:  dc      \op, \kaddr
+       add     \kaddr, \kaddr, \tmp1
+       cmp     \kaddr, \size
+       b.lo    9998b
+       dsb     \domain
+       .endm
index b8f04b3f2786cc7ef29c1e47bf3bfec0f4bfc840..0c19534a901e616ecc5fe508ce205dc0de8fe0f4 100644 (file)
@@ -140,7 +140,33 @@ ENTRY(cpu_do_switch_mm)
        ret
 ENDPROC(cpu_do_switch_mm)
 
-       .section ".text.init", #alloc, #execinstr
+       .pushsection ".idmap.text", "ax"
+/*
+ * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
+ *
+ * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
+ * called by anything else. It can only be executed from a TTBR0 mapping.
+ */
+ENTRY(idmap_cpu_replace_ttbr1)
+       mrs     x2, daif
+       msr     daifset, #0xf
+
+       adrp    x1, empty_zero_page
+       msr     ttbr1_el1, x1
+       isb
+
+       tlbi    vmalle1
+       dsb     nsh
+       isb
+
+       msr     ttbr1_el1, x0
+       isb
+
+       msr     daif, x2
+
+       ret
+ENDPROC(idmap_cpu_replace_ttbr1)
+       .popsection
 
 /*
  *     __cpu_setup
index 729f89163bc32113dba77e309c8ce767ed3d15e8..d2256fa97ea0c7875df5d21bcfff23bb6f25a0d9 100644 (file)
@@ -11,6 +11,7 @@ config PARISC
        select RTC_DRV_GENERIC
        select INIT_ALL_POSSIBLE
        select BUG
+       select BUILDTIME_EXTABLE_SORT
        select HAVE_PERF_EVENTS
        select GENERIC_ATOMIC64 if !64BIT
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
index b3069fd83468c5972f98d19f8f17dfa9bfb5e0cf..60e6f07b7e326bc4eb306105dcae477d5aa01f0e 100644 (file)
         */
 #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr)      \
        .section __ex_table,"aw"                        !       \
-       ASM_ULONG_INSN  fault_addr, except_addr         !       \
+       .word (fault_addr - .), (except_addr - .)       !       \
        .previous
 
 
index 3d0e17bcc8e905ece06053ae15b3ff5443e6b033..df0f52bd18b457f9efa1a0b9fc470b0803f50632 100644 (file)
@@ -22,6 +22,9 @@
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
+/* Read-only memory is marked before mark_rodata_ro() is called. */
+#define __ro_after_init        __read_mostly
+
 void parisc_cache_init(void);  /* initializes cache-flushing */
 void disable_sr_hashing_asm(int); /* low level support for above */
 void disable_sr_hashing(void);   /* turns off space register hashing */
index 845272ce9cc587222e835131bb5ed96be0be03d4..7bd69bd43a018577d099f373346383900b1f0121 100644 (file)
@@ -121,10 +121,6 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
        }
 }
 
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
-#endif
-
 #include <asm/kmap_types.h>
 
 #define ARCH_HAS_KMAP
index 1960b87c1c8becb9796babc5c77e4452472b6e37..6f893d29f1b21625aadc4464d06ffba30b7697f3 100644 (file)
@@ -60,14 +60,15 @@ static inline long access_ok(int type, const void __user * addr,
  * use a 32bit (unsigned int) address here.
  */
 
+#define ARCH_HAS_RELATIVE_EXTABLE
 struct exception_table_entry {
-       unsigned long insn;     /* address of insn that is allowed to fault. */
-       unsigned long fixup;    /* fixup routine */
+       int insn;       /* relative address of insn that is allowed to fault. */
+       int fixup;      /* relative address of fixup routine */
 };
 
 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
        ".section __ex_table,\"aw\"\n"                     \
-       ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
+       ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
        ".previous\n"
 
 /*
index f9064449908aad5ae01fbeb1c92e97399b0b828b..16dbe81c97c9005df3cbb91045ccb6ed20878930 100644 (file)
@@ -140,12 +140,6 @@ int fixup_exception(struct pt_regs *regs)
 {
        const struct exception_table_entry *fix;
 
-       /* If we only stored 32bit addresses in the exception table we can drop
-        * out if we faulted on a 64bit address. */
-       if ((sizeof(regs->iaoq[0]) > sizeof(fix->insn))
-               && (regs->iaoq[0] >> 32))
-                       return 0;
-
        fix = search_exception_tables(regs->iaoq[0]);
        if (fix) {
                struct exception_data *d;
@@ -155,7 +149,8 @@ int fixup_exception(struct pt_regs *regs)
                d->fault_space = regs->isr;
                d->fault_addr = regs->ior;
 
-               regs->iaoq[0] = ((fix->fixup) & ~3);
+               regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
+               regs->iaoq[0] &= ~3;
                /*
                 * NOTE: In some cases the faulting instruction
                 * may be in the delay slot of a branch. We
index 436639a316248532dee15c0ab527346c02448788..9d5e3a27bef202a9c8e38da85e6dcd1274cb1627 100644 (file)
@@ -289,6 +289,9 @@ config ARCH_SUPPORTS_UPROBES
 config FIX_EARLYCON_MEM
        def_bool y
 
+config DEBUG_RODATA
+       def_bool y
+
 config PGTABLE_LEVELS
        int
        default 4 if X86_64
index 137dfa96aa14e1c8c3a9c1d7b50b21d910cabae4..1f6c306a9a009bc442ed1c1677ac5b900edbcf66 100644 (file)
@@ -91,28 +91,16 @@ config EFI_PGT_DUMP
          issues with the mapping of the EFI runtime regions into that
          table.
 
-config DEBUG_RODATA
-       bool "Write protect kernel read-only data structures"
-       default y
-       depends on DEBUG_KERNEL
-       ---help---
-         Mark the kernel read-only data as write-protected in the pagetables,
-         in order to catch accidental (and incorrect) writes to such const
-         data. This is recommended so that we can catch kernel bugs sooner.
-         If in doubt, say "Y".
-
 config DEBUG_RODATA_TEST
-       bool "Testcase for the DEBUG_RODATA feature"
-       depends on DEBUG_RODATA
+       bool "Testcase for the marking rodata read-only"
        default y
        ---help---
-         This option enables a testcase for the DEBUG_RODATA
-         feature as well as for the change_page_attr() infrastructure.
+         This option enables a testcase for the setting rodata read-only
+         as well as for the change_page_attr() infrastructure.
          If in doubt, say "N"
 
 config DEBUG_WX
        bool "Warn on W+X mappings at boot"
-       depends on DEBUG_RODATA
        select X86_PTDUMP_CORE
        ---help---
          Generate a warning if any W+X mappings are found at boot.
index 0224987556ce80bd606063b56ef124b0857a3f44..3f69326ed545719aedd9e11b1be0fdea104136bc 100644 (file)
@@ -140,7 +140,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
        fprintf(outfile, "#include <asm/vdso.h>\n");
        fprintf(outfile, "\n");
        fprintf(outfile,
-               "static unsigned char raw_data[%lu] __page_aligned_data = {",
+               "static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {",
                mapping_size);
        for (j = 0; j < stripped_len; j++) {
                if (j % 10 == 0)
index e63aa38e85fb23375ecefa40efe8cd9d76da6c43..61518cf79437679788bd41a1ee1c942aea29a8cb 100644 (file)
@@ -91,16 +91,10 @@ void clflush_cache_range(void *addr, unsigned int size);
 
 #define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
 
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
 extern const int rodata_test_data;
 extern int kernel_set_to_readonly;
 void set_kernel_text_rw(void);
 void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
 
 #ifdef CONFIG_DEBUG_RODATA_TEST
 int rodata_test(void);
index 0010c78c4998cf0702299ea2f8a9229e09bb6438..08b1f2f6ea50c186933d0d9e79c82da0b9da4f3f 100644 (file)
@@ -25,6 +25,8 @@
 #define EFI32_LOADER_SIGNATURE "EL32"
 #define EFI64_LOADER_SIGNATURE "EL64"
 
+#define MAX_CMDLINE_ADDRESS    UINT_MAX
+
 #ifdef CONFIG_X86_32
 
 
index c1adf33fdd0d6f70f055b9a056bc7787bda7635e..bc62e7cbf1b1f883fc9acb0a145305384a3bf062 100644 (file)
@@ -17,15 +17,8 @@ static inline bool kvm_check_and_clear_guest_paused(void)
 }
 #endif /* CONFIG_KVM_GUEST */
 
-#ifdef CONFIG_DEBUG_RODATA
 #define KVM_HYPERCALL \
         ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
-#else
-/* On AMD processors, vmcall will generate a trap that we will
- * then rewrite to the appropriate instruction.
- */
-#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
-#endif
 
 /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
  * instruction.  The hypervisor may replace it with something else but only the
index 0a5242428659045cfb439d3045593cc1c63aad96..13b6cdd0af57049468e47ef884e6eccba49dca0e 100644 (file)
@@ -7,7 +7,7 @@
 extern char __brk_base[], __brk_limit[];
 extern struct exception_table_entry __stop___ex_table[];
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+#if defined(CONFIG_X86_64)
 extern char __end_rodata_hpage_align[];
 #endif
 
index 311bcf338f07e75a48115ef4edf3df5f748b6940..eb6bd34582c692f69eb48fc50c44a9b502becd4e 100644 (file)
@@ -81,9 +81,9 @@ within(unsigned long addr, unsigned long start, unsigned long end)
 static unsigned long text_ip_addr(unsigned long ip)
 {
        /*
-        * On x86_64, kernel text mappings are mapped read-only with
-        * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
-        * of the kernel text mapping to modify the kernel text.
+        * On x86_64, kernel text mappings are mapped read-only, so we use
+        * the kernel identity mapping instead of the kernel text mapping
+        * to modify the kernel text.
         *
         * For 32bit kernels, these mappings are same and we can use
         * kernel identity mapping to modify code.
index 44256a62702b2c51077fc0b8b82a904ed122b9f6..ed15cd486d06347626c080a0081e3eed01ac9128 100644 (file)
@@ -750,9 +750,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
 {
        int err;
-#ifdef CONFIG_DEBUG_RODATA
        char opc[BREAK_INSTR_SIZE];
-#endif /* CONFIG_DEBUG_RODATA */
 
        bpt->type = BP_BREAKPOINT;
        err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
@@ -761,7 +759,6 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
                return err;
        err = probe_kernel_write((char *)bpt->bpt_addr,
                                 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
-#ifdef CONFIG_DEBUG_RODATA
        if (!err)
                return err;
        /*
@@ -778,13 +775,12 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
        if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
                return -EINVAL;
        bpt->type = BP_POKE_BREAKPOINT;
-#endif /* CONFIG_DEBUG_RODATA */
+
        return err;
 }
 
 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
 {
-#ifdef CONFIG_DEBUG_RODATA
        int err;
        char opc[BREAK_INSTR_SIZE];
 
@@ -801,8 +797,8 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
        if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
                goto knl_write;
        return err;
+
 knl_write:
-#endif /* CONFIG_DEBUG_RODATA */
        return probe_kernel_write((char *)bpt->bpt_addr,
                                  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
 }
index 3f92ce07e525fd41167a64f9d7c4df1bfbe673bf..27538f183c3b15d9d59e225f2612b12660ee979b 100644 (file)
@@ -142,7 +142,6 @@ static int test_NX(void)
         * by the error message
         */
 
-#ifdef CONFIG_DEBUG_RODATA
        /* Test 3: Check if the .rodata section is executable */
        if (rodata_test_data != 0xC3) {
                printk(KERN_ERR "test_nx: .rodata marker has invalid value\n");
@@ -151,7 +150,6 @@ static int test_NX(void)
                printk(KERN_ERR "test_nx: .rodata section is executable\n");
                ret = -ENODEV;
        }
-#endif
 
 #if 0
        /* Test 4: Check if the .data section of a module is executable */
index 5ecbfe5099dad68e140b85ffdaefc75818afc6d9..cb4a01b41e277887b6769e4a17d60cb4139f9864 100644 (file)
@@ -76,5 +76,5 @@ int rodata_test(void)
 }
 
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Testcase for the DEBUG_RODATA infrastructure");
+MODULE_DESCRIPTION("Testcase for marking rodata as read-only");
 MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
index 74e4bf11f562e0354c227518421e2375ec16fafa..fe133b710befa9a7495178374076e465ec61668f 100644 (file)
@@ -41,29 +41,28 @@ ENTRY(phys_startup_64)
 jiffies_64 = jiffies;
 #endif
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+#if defined(CONFIG_X86_64)
 /*
- * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
- * we retain large page mappings for boundaries spanning kernel text, rodata
- * and data sections.
+ * On 64-bit, align RODATA to 2MB so we retain large page mappings for
+ * boundaries spanning kernel text, rodata and data sections.
  *
  * However, kernel identity mappings will have different RWX permissions
  * to the pages mapping to text and to the pages padding (which are freed) the
  * text section. Hence kernel identity mappings will be broken to smaller
  * pages. For 64-bit, kernel text and kernel identity mappings are different,
- * so we can enable protection checks that come with CONFIG_DEBUG_RODATA,
- * as well as retain 2MB large page mappings for kernel text.
+ * so we can enable protection checks as well as retain 2MB large page
+ * mappings for kernel text.
  */
-#define X64_ALIGN_DEBUG_RODATA_BEGIN   . = ALIGN(HPAGE_SIZE);
+#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
 
-#define X64_ALIGN_DEBUG_RODATA_END                             \
+#define X64_ALIGN_RODATA_END                                   \
                . = ALIGN(HPAGE_SIZE);                          \
                __end_rodata_hpage_align = .;
 
 #else
 
-#define X64_ALIGN_DEBUG_RODATA_BEGIN
-#define X64_ALIGN_DEBUG_RODATA_END
+#define X64_ALIGN_RODATA_BEGIN
+#define X64_ALIGN_RODATA_END
 
 #endif
 
@@ -112,13 +111,11 @@ SECTIONS
 
        EXCEPTION_TABLE(16) :text = 0x9090
 
-#if defined(CONFIG_DEBUG_RODATA)
        /* .text should occupy whole number of pages */
        . = ALIGN(PAGE_SIZE);
-#endif
-       X64_ALIGN_DEBUG_RODATA_BEGIN
+       X64_ALIGN_RODATA_BEGIN
        RO_DATA(PAGE_SIZE)
-       X64_ALIGN_DEBUG_RODATA_END
+       X64_ALIGN_RODATA_END
 
        /* Data */
        .data : AT(ADDR(.data) - LOAD_OFFSET) {
index cb4ef3de61f9ae9c95249876965a71a5d7b58cf8..2ebfbaf611424be1937c4e2288776d50d1c8ff9e 100644 (file)
@@ -871,7 +871,6 @@ static noinline int do_test_wp_bit(void)
        return flag;
 }
 
-#ifdef CONFIG_DEBUG_RODATA
 const int rodata_test_data = 0xC3;
 EXPORT_SYMBOL_GPL(rodata_test_data);
 
@@ -960,5 +959,3 @@ void mark_rodata_ro(void)
        if (__supported_pte_mask & _PAGE_NX)
                debug_checkwx();
 }
-#endif
-
index ec081fe0ce2c10246fec7126dd213aa6b4d75dbb..e08d141844ee81897dd80a76b4ca240b6270b680 100644 (file)
@@ -1062,7 +1062,6 @@ void __init mem_init(void)
        mem_init_print_info(NULL);
 }
 
-#ifdef CONFIG_DEBUG_RODATA
 const int rodata_test_data = 0xC3;
 EXPORT_SYMBOL_GPL(rodata_test_data);
 
@@ -1154,8 +1153,6 @@ void mark_rodata_ro(void)
        debug_checkwx();
 }
 
-#endif
-
 int kern_addr_valid(unsigned long addr)
 {
        unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
index b599a780a5a915fb5d1a5b03b1ba63489ab13a19..4540e8880cd925cd3bd933426a1d4fb43d7e5176 100644 (file)
@@ -278,7 +278,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
                   __pa_symbol(__end_rodata) >> PAGE_SHIFT))
                pgprot_val(forbidden) |= _PAGE_RW;
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+#if defined(CONFIG_X86_64)
        /*
         * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
         * kernel text mappings for the large page aligned text, rodata sections
index 33c1e18c41a4d467259fb438911418372b7e82a4..19837ef04d8ef21a355e22171117398b2f966f80 100644 (file)
@@ -1,2 +1,3 @@
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 obj-y                          += core.o cpu.o
+obj-$(CONFIG_DEBUG_FS)         += debugfs.o
index f8580900c2739d9c4389c6aa961a8dd2cdb5306c..433b60092972d56abba55897158d6c22156cf631 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/of.h>
 #include <linux/export.h>
+#include <linux/regulator/consumer.h>
 
 #include "opp.h"
 
 /*
- * The root of the list of all devices. All device_opp structures branch off
- * from here, with each device_opp containing the list of opp it supports in
+ * The root of the list of all opp-tables. All opp_table structures branch off
+ * from here, with each opp_table containing the list of opps it supports in
  * various states of availability.
  */
-static LIST_HEAD(dev_opp_list);
+static LIST_HEAD(opp_tables);
 /* Lock to allow exclusive modification to the device and opp lists */
-DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(opp_table_lock);
 
 #define opp_rcu_lockdep_assert()                                       \
 do {                                                                   \
        RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
-                               !lockdep_is_held(&dev_opp_list_lock),   \
-                          "Missing rcu_read_lock() or "                \
-                          "dev_opp_list_lock protection");             \
+                        !lockdep_is_held(&opp_table_lock),             \
+                        "Missing rcu_read_lock() or "                  \
+                        "opp_table_lock protection");                  \
 } while (0)
 
-static struct device_list_opp *_find_list_dev(const struct device *dev,
-                                             struct device_opp *dev_opp)
+static struct opp_device *_find_opp_dev(const struct device *dev,
+                                       struct opp_table *opp_table)
 {
-       struct device_list_opp *list_dev;
+       struct opp_device *opp_dev;
 
-       list_for_each_entry(list_dev, &dev_opp->dev_list, node)
-               if (list_dev->dev == dev)
-                       return list_dev;
+       list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+               if (opp_dev->dev == dev)
+                       return opp_dev;
 
        return NULL;
 }
 
-static struct device_opp *_managed_opp(const struct device_node *np)
+static struct opp_table *_managed_opp(const struct device_node *np)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
 
-       list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
-               if (dev_opp->np == np) {
+       list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+               if (opp_table->np == np) {
                        /*
                         * Multiple devices can point to the same OPP table and
                         * so will have same node-pointer, np.
@@ -64,7 +66,7 @@ static struct device_opp *_managed_opp(const struct device_node *np)
                         * But the OPPs will be considered as shared only if the
                         * OPP table contains a "opp-shared" property.
                         */
-                       return dev_opp->shared_opp ? dev_opp : NULL;
+                       return opp_table->shared_opp ? opp_table : NULL;
                }
        }
 
@@ -72,24 +74,24 @@ static struct device_opp *_managed_opp(const struct device_node *np)
 }
 
 /**
- * _find_device_opp() - find device_opp struct using device pointer
- * @dev:       device pointer used to lookup device OPPs
+ * _find_opp_table() - find opp_table struct using device pointer
+ * @dev:       device pointer used to lookup OPP table
  *
- * Search list of device OPPs for one containing matching device. Does a RCU
- * reader operation to grab the pointer needed.
+ * Search OPP table for one containing matching device. Does a RCU reader
+ * operation to grab the pointer needed.
  *
- * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  * -EINVAL based on type of error.
  *
  * Locking: For readers, this function must be called under rcu_read_lock().
- * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * opp_table is a RCU protected pointer, which means that opp_table is valid
  * as long as we are under RCU lock.
  *
- * For Writers, this function must be called with dev_opp_list_lock held.
+ * For Writers, this function must be called with opp_table_lock held.
  */
-struct device_opp *_find_device_opp(struct device *dev)
+struct opp_table *_find_opp_table(struct device *dev)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
 
        opp_rcu_lockdep_assert();
 
@@ -98,9 +100,9 @@ struct device_opp *_find_device_opp(struct device *dev)
                return ERR_PTR(-EINVAL);
        }
 
-       list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
-               if (_find_list_dev(dev, dev_opp))
-                       return dev_opp;
+       list_for_each_entry_rcu(opp_table, &opp_tables, node)
+               if (_find_opp_dev(dev, opp_table))
+                       return opp_table;
 
        return ERR_PTR(-ENODEV);
 }
@@ -213,22 +215,98 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  */
 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        unsigned long clock_latency_ns;
 
        rcu_read_lock();
 
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp))
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table))
                clock_latency_ns = 0;
        else
-               clock_latency_ns = dev_opp->clock_latency_ns_max;
+               clock_latency_ns = opp_table->clock_latency_ns_max;
 
        rcu_read_unlock();
        return clock_latency_ns;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
 
+/**
+ * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max voltage latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+       struct opp_table *opp_table;
+       struct dev_pm_opp *opp;
+       struct regulator *reg;
+       unsigned long latency_ns = 0;
+       unsigned long min_uV = ~0, max_uV = 0;
+       int ret;
+
+       rcu_read_lock();
+
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               rcu_read_unlock();
+               return 0;
+       }
+
+       reg = opp_table->regulator;
+       if (IS_ERR(reg)) {
+               /* Regulator may not be required for device */
+               if (reg)
+                       dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
+                               PTR_ERR(reg));
+               rcu_read_unlock();
+               return 0;
+       }
+
+       list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+               if (!opp->available)
+                       continue;
+
+               if (opp->u_volt_min < min_uV)
+                       min_uV = opp->u_volt_min;
+               if (opp->u_volt_max > max_uV)
+                       max_uV = opp->u_volt_max;
+       }
+
+       rcu_read_unlock();
+
+       /*
+        * The caller needs to ensure that opp_table (and hence the regulator)
+        * isn't freed, while we are executing this routine.
+        */
+       ret = regulator_set_voltage_time(reg, min_uV, max_uV);
+       if (ret > 0)
+               latency_ns = ret * 1000;
+
+       return latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
+
+/**
+ * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
+ *                                          nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max transition latency, in nanoseconds, to
+ * switch from one OPP to other.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+       return dev_pm_opp_get_max_volt_latency(dev) +
+               dev_pm_opp_get_max_clock_latency(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
+
 /**
  * dev_pm_opp_get_suspend_opp() - Get suspend opp
  * @dev:       device for which we do this operation
@@ -244,21 +322,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  */
 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
 
        opp_rcu_lockdep_assert();
 
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
-           !dev_opp->suspend_opp->available)
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
+           !opp_table->suspend_opp->available)
                return NULL;
 
-       return dev_opp->suspend_opp;
+       return opp_table->suspend_opp;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
 
 /**
- * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  * @dev:       device for which we do this operation
  *
  * Return: This function returns the number of available opps if there are any,
@@ -268,21 +346,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
  */
 int dev_pm_opp_get_opp_count(struct device *dev)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct dev_pm_opp *temp_opp;
        int count = 0;
 
        rcu_read_lock();
 
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp)) {
-               count = PTR_ERR(dev_opp);
-               dev_err(dev, "%s: device OPP not found (%d)\n",
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               count = PTR_ERR(opp_table);
+               dev_err(dev, "%s: OPP table not found (%d)\n",
                        __func__, count);
                goto out_unlock;
        }
 
-       list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+       list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
                if (temp_opp->available)
                        count++;
        }
@@ -299,7 +377,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  * @freq:              frequency to search for
  * @available:         true/false - match for available opp
  *
- * Return: Searches for exact match in the opp list and returns pointer to the
+ * Return: Searches for exact match in the opp table and returns pointer to the
  * matching opp if found, else returns ERR_PTR in case of error and should
  * be handled using IS_ERR. Error return values can be:
  * EINVAL:     for bad pointer
@@ -323,19 +401,20 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
                                              unsigned long freq,
                                              bool available)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        opp_rcu_lockdep_assert();
 
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp)) {
-               int r = PTR_ERR(dev_opp);
-               dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               int r = PTR_ERR(opp_table);
+
+               dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
                return ERR_PTR(r);
        }
 
-       list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+       list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
                if (temp_opp->available == available &&
                                temp_opp->rate == freq) {
                        opp = temp_opp;
@@ -371,7 +450,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
                                             unsigned long *freq)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        opp_rcu_lockdep_assert();
@@ -381,11 +460,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
                return ERR_PTR(-EINVAL);
        }
 
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp))
-               return ERR_CAST(dev_opp);
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table))
+               return ERR_CAST(opp_table);
 
-       list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+       list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
                if (temp_opp->available && temp_opp->rate >= *freq) {
                        opp = temp_opp;
                        *freq = opp->rate;
@@ -421,7 +500,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
                                              unsigned long *freq)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        opp_rcu_lockdep_assert();
@@ -431,11 +510,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
                return ERR_PTR(-EINVAL);
        }
 
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp))
-               return ERR_CAST(dev_opp);
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table))
+               return ERR_CAST(opp_table);
 
-       list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+       list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
                if (temp_opp->available) {
                        /* go to the next node, before choosing prev */
                        if (temp_opp->rate > *freq)
@@ -451,116 +530,343 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
-/* List-dev Helpers */
-static void _kfree_list_dev_rcu(struct rcu_head *head)
+/*
+ * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
+ * while clk returned here is used.
+ */
+static struct clk *_get_opp_clk(struct device *dev)
 {
-       struct device_list_opp *list_dev;
+       struct opp_table *opp_table;
+       struct clk *clk;
+
+       rcu_read_lock();
 
-       list_dev = container_of(head, struct device_list_opp, rcu_head);
-       kfree_rcu(list_dev, rcu_head);
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+               clk = ERR_CAST(opp_table);
+               goto unlock;
+       }
+
+       clk = opp_table->clk;
+       if (IS_ERR(clk))
+               dev_err(dev, "%s: No clock available for the device\n",
+                       __func__);
+
+unlock:
+       rcu_read_unlock();
+       return clk;
 }
 
-static void _remove_list_dev(struct device_list_opp *list_dev,
-                            struct device_opp *dev_opp)
+static int _set_opp_voltage(struct device *dev, struct regulator *reg,
+                           unsigned long u_volt, unsigned long u_volt_min,
+                           unsigned long u_volt_max)
 {
-       list_del(&list_dev->node);
-       call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
-                 _kfree_list_dev_rcu);
+       int ret;
+
+       /* Regulator not available for device */
+       if (IS_ERR(reg)) {
+               dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
+                       PTR_ERR(reg));
+               return 0;
+       }
+
+       dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
+               u_volt, u_volt_max);
+
+       ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
+                                           u_volt_max);
+       if (ret)
+               dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
+                       __func__, u_volt_min, u_volt, u_volt_max, ret);
+
+       return ret;
 }
 
-struct device_list_opp *_add_list_dev(const struct device *dev,
-                                     struct device_opp *dev_opp)
+/**
+ * dev_pm_opp_set_rate() - Configure new OPP based on frequency
+ * @dev:        device for which we do this operation
+ * @target_freq: frequency to achieve
+ *
+ * This configures the power-supplies and clock source to the levels specified
+ * by the OPP corresponding to the target_freq.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 {
-       struct device_list_opp *list_dev;
+       struct opp_table *opp_table;
+       struct dev_pm_opp *old_opp, *opp;
+       struct regulator *reg;
+       struct clk *clk;
+       unsigned long freq, old_freq;
+       unsigned long u_volt, u_volt_min, u_volt_max;
+       unsigned long ou_volt, ou_volt_min, ou_volt_max;
+       int ret;
+
+       if (unlikely(!target_freq)) {
+               dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
+                       target_freq);
+               return -EINVAL;
+       }
+
+       clk = _get_opp_clk(dev);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       freq = clk_round_rate(clk, target_freq);
+       if ((long)freq <= 0)
+               freq = target_freq;
+
+       old_freq = clk_get_rate(clk);
+
+       /* Return early if nothing to do */
+       if (old_freq == freq) {
+               dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+                       __func__, freq);
+               return 0;
+       }
+
+       rcu_read_lock();
+
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+               rcu_read_unlock();
+               return PTR_ERR(opp_table);
+       }
+
+       old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+       if (!IS_ERR(old_opp)) {
+               ou_volt = old_opp->u_volt;
+               ou_volt_min = old_opp->u_volt_min;
+               ou_volt_max = old_opp->u_volt_max;
+       } else {
+               dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
+                       __func__, old_freq, PTR_ERR(old_opp));
+       }
+
+       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+       if (IS_ERR(opp)) {
+               ret = PTR_ERR(opp);
+               dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+                       __func__, freq, ret);
+               rcu_read_unlock();
+               return ret;
+       }
 
-       list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
-       if (!list_dev)
+       u_volt = opp->u_volt;
+       u_volt_min = opp->u_volt_min;
+       u_volt_max = opp->u_volt_max;
+
+       reg = opp_table->regulator;
+
+       rcu_read_unlock();
+
+       /* Scaling up? Scale voltage before frequency */
+       if (freq > old_freq) {
+               ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+                                      u_volt_max);
+               if (ret)
+                       goto restore_voltage;
+       }
+
+       /* Change frequency */
+
+       dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+               __func__, old_freq, freq);
+
+       ret = clk_set_rate(clk, freq);
+       if (ret) {
+               dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+                       ret);
+               goto restore_voltage;
+       }
+
+       /* Scaling down? Scale voltage after frequency */
+       if (freq < old_freq) {
+               ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+                                      u_volt_max);
+               if (ret)
+                       goto restore_freq;
+       }
+
+       return 0;
+
+restore_freq:
+       if (clk_set_rate(clk, old_freq))
+               dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+                       __func__, old_freq);
+restore_voltage:
+       /* This shouldn't harm even if the voltages weren't updated earlier */
+       if (!IS_ERR(old_opp))
+               _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+
+/* OPP-dev Helpers */
+static void _kfree_opp_dev_rcu(struct rcu_head *head)
+{
+       struct opp_device *opp_dev;
+
+       opp_dev = container_of(head, struct opp_device, rcu_head);
+       kfree_rcu(opp_dev, rcu_head);
+}
+
+static void _remove_opp_dev(struct opp_device *opp_dev,
+                           struct opp_table *opp_table)
+{
+       opp_debug_unregister(opp_dev, opp_table);
+       list_del(&opp_dev->node);
+       call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
+                 _kfree_opp_dev_rcu);
+}
+
+struct opp_device *_add_opp_dev(const struct device *dev,
+                               struct opp_table *opp_table)
+{
+       struct opp_device *opp_dev;
+       int ret;
+
+       opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
+       if (!opp_dev)
                return NULL;
 
-       /* Initialize list-dev */
-       list_dev->dev = dev;
-       list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+       /* Initialize opp-dev */
+       opp_dev->dev = dev;
+       list_add_rcu(&opp_dev->node, &opp_table->dev_list);
+
+       /* Create debugfs entries for the opp_table */
+       ret = opp_debug_register(opp_dev, opp_table);
+       if (ret)
+               dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
+                       __func__, ret);
 
-       return list_dev;
+       return opp_dev;
 }
 
 /**
- * _add_device_opp() - Find device OPP table or allocate a new one
+ * _add_opp_table() - Find OPP table or allocate a new one
  * @dev:       device for which we do this operation
  *
  * It tries to find an existing table first, if it couldn't find one, it
  * allocates a new OPP table and returns that.
  *
- * Return: valid device_opp pointer if success, else NULL.
+ * Return: valid opp_table pointer if success, else NULL.
  */
-static struct device_opp *_add_device_opp(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev)
 {
-       struct device_opp *dev_opp;
-       struct device_list_opp *list_dev;
+       struct opp_table *opp_table;
+       struct opp_device *opp_dev;
+       struct device_node *np;
+       int ret;
 
-       /* Check for existing list for 'dev' first */
-       dev_opp = _find_device_opp(dev);
-       if (!IS_ERR(dev_opp))
-               return dev_opp;
+       /* Check for existing table for 'dev' first */
+       opp_table = _find_opp_table(dev);
+       if (!IS_ERR(opp_table))
+               return opp_table;
 
        /*
-        * Allocate a new device OPP table. In the infrequent case where a new
+        * Allocate a new OPP table. In the infrequent case where a new
         * device is needed to be added, we pay this penalty.
         */
-       dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
-       if (!dev_opp)
+       opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
+       if (!opp_table)
                return NULL;
 
-       INIT_LIST_HEAD(&dev_opp->dev_list);
+       INIT_LIST_HEAD(&opp_table->dev_list);
 
-       list_dev = _add_list_dev(dev, dev_opp);
-       if (!list_dev) {
-               kfree(dev_opp);
+       opp_dev = _add_opp_dev(dev, opp_table);
+       if (!opp_dev) {
+               kfree(opp_table);
                return NULL;
        }
 
-       srcu_init_notifier_head(&dev_opp->srcu_head);
-       INIT_LIST_HEAD(&dev_opp->opp_list);
+       /*
+        * Only required for backward compatibility with v1 bindings, but isn't
+        * harmful for other cases. And so we do it unconditionally.
+        */
+       np = of_node_get(dev->of_node);
+       if (np) {
+               u32 val;
+
+               if (!of_property_read_u32(np, "clock-latency", &val))
+                       opp_table->clock_latency_ns_max = val;
+               of_property_read_u32(np, "voltage-tolerance",
+                                    &opp_table->voltage_tolerance_v1);
+               of_node_put(np);
+       }
+
+       /* Set regulator to a non-NULL error value */
+       opp_table->regulator = ERR_PTR(-ENXIO);
+
+       /* Find clk for the device */
+       opp_table->clk = clk_get(dev, NULL);
+       if (IS_ERR(opp_table->clk)) {
+               ret = PTR_ERR(opp_table->clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
+                               ret);
+       }
 
-       /* Secure the device list modification */
-       list_add_rcu(&dev_opp->node, &dev_opp_list);
-       return dev_opp;
+       srcu_init_notifier_head(&opp_table->srcu_head);
+       INIT_LIST_HEAD(&opp_table->opp_list);
+
+       /* Secure the device table modification */
+       list_add_rcu(&opp_table->node, &opp_tables);
+       return opp_table;
 }
 
 /**
- * _kfree_device_rcu() - Free device_opp RCU handler
+ * _kfree_device_rcu() - Free opp_table RCU handler
  * @head:      RCU head
  */
 static void _kfree_device_rcu(struct rcu_head *head)
 {
-       struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+       struct opp_table *opp_table = container_of(head, struct opp_table,
+                                                  rcu_head);
 
-       kfree_rcu(device_opp, rcu_head);
+       kfree_rcu(opp_table, rcu_head);
 }
 
 /**
- * _remove_device_opp() - Removes a device OPP table
- * @dev_opp: device OPP table to be removed.
+ * _remove_opp_table() - Removes a OPP table
+ * @opp_table: OPP table to be removed.
  *
- * Removes/frees device OPP table it it doesn't contain any OPPs.
+ * Removes/frees OPP table if it doesn't contain any OPPs.
  */
-static void _remove_device_opp(struct device_opp *dev_opp)
+static void _remove_opp_table(struct opp_table *opp_table)
 {
-       struct device_list_opp *list_dev;
+       struct opp_device *opp_dev;
+
+       if (!list_empty(&opp_table->opp_list))
+               return;
+
+       if (opp_table->supported_hw)
+               return;
 
-       if (!list_empty(&dev_opp->opp_list))
+       if (opp_table->prop_name)
                return;
 
-       list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
-                                   node);
+       if (!IS_ERR(opp_table->regulator))
+               return;
+
+       /* Release clk */
+       if (!IS_ERR(opp_table->clk))
+               clk_put(opp_table->clk);
+
+       opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
+                                  node);
 
-       _remove_list_dev(list_dev, dev_opp);
+       _remove_opp_dev(opp_dev, opp_table);
 
        /* dev_list must be empty now */
-       WARN_ON(!list_empty(&dev_opp->dev_list));
+       WARN_ON(!list_empty(&opp_table->dev_list));
 
-       list_del_rcu(&dev_opp->node);
-       call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+       list_del_rcu(&opp_table->node);
+       call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
                  _kfree_device_rcu);
 }
 
@@ -577,17 +883,17 @@ static void _kfree_opp_rcu(struct rcu_head *head)
 
 /**
  * _opp_remove()  - Remove an OPP from a table definition
- * @dev_opp:   points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
  * @opp:       pointer to the OPP to remove
  * @notify:    OPP_EVENT_REMOVE notification should be sent or not
  *
- * This function removes an opp definition from the opp list.
+ * This function removes an opp definition from the opp table.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * It is assumed that the caller holds required mutex for an RCU updater
  * strategy.
  */
-static void _opp_remove(struct device_opp *dev_opp,
+static void _opp_remove(struct opp_table *opp_table,
                        struct dev_pm_opp *opp, bool notify)
 {
        /*
@@ -595,21 +901,23 @@ static void _opp_remove(struct device_opp *dev_opp,
         * frequency/voltage list.
         */
        if (notify)
-               srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+               srcu_notifier_call_chain(&opp_table->srcu_head,
+                                        OPP_EVENT_REMOVE, opp);
+       opp_debug_remove_one(opp);
        list_del_rcu(&opp->node);
-       call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+       call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 
-       _remove_device_opp(dev_opp);
+       _remove_opp_table(opp_table);
 }
 
 /**
- * dev_pm_opp_remove()  - Remove an OPP from OPP list
+ * dev_pm_opp_remove()  - Remove an OPP from OPP table
  * @dev:       device for which we do this operation
  * @freq:      OPP to remove with matching 'freq'
  *
- * This function removes an opp from the opp list.
+ * This function removes an opp from the opp table.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -618,17 +926,17 @@ static void _opp_remove(struct device_opp *dev_opp,
 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 {
        struct dev_pm_opp *opp;
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        bool found = false;
 
-       /* Hold our list modification lock here */
-       mutex_lock(&dev_opp_list_lock);
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
 
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp))
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table))
                goto unlock;
 
-       list_for_each_entry(opp, &dev_opp->opp_list, node) {
+       list_for_each_entry(opp, &opp_table->opp_list, node) {
                if (opp->rate == freq) {
                        found = true;
                        break;
@@ -641,14 +949,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
                goto unlock;
        }
 
-       _opp_remove(dev_opp, opp, true);
+       _opp_remove(opp_table, opp, true);
 unlock:
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
 static struct dev_pm_opp *_allocate_opp(struct device *dev,
-                                       struct device_opp **dev_opp)
+                                       struct opp_table **opp_table)
 {
        struct dev_pm_opp *opp;
 
@@ -659,8 +967,8 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
 
        INIT_LIST_HEAD(&opp->node);
 
-       *dev_opp = _add_device_opp(dev);
-       if (!*dev_opp) {
+       *opp_table = _add_opp_table(dev);
+       if (!*opp_table) {
                kfree(opp);
                return NULL;
        }
@@ -668,21 +976,38 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
        return opp;
 }
 
+static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+                                        struct opp_table *opp_table)
+{
+       struct regulator *reg = opp_table->regulator;
+
+       if (!IS_ERR(reg) &&
+           !regulator_is_supported_voltage(reg, opp->u_volt_min,
+                                           opp->u_volt_max)) {
+               pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+                       __func__, opp->u_volt_min, opp->u_volt_max);
+               return false;
+       }
+
+       return true;
+}
+
 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
-                   struct device_opp *dev_opp)
+                   struct opp_table *opp_table)
 {
        struct dev_pm_opp *opp;
-       struct list_head *head = &dev_opp->opp_list;
+       struct list_head *head = &opp_table->opp_list;
+       int ret;
 
        /*
         * Insert new OPP in order of increasing frequency and discard if
         * already present.
         *
-        * Need to use &dev_opp->opp_list in the condition part of the 'for'
+        * Need to use &opp_table->opp_list in the condition part of the 'for'
         * loop, don't replace it with head otherwise it will become an infinite
         * loop.
         */
-       list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+       list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
                if (new_opp->rate > opp->rate) {
                        head = &opp->node;
                        continue;
@@ -700,9 +1025,20 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
                        0 : -EEXIST;
        }
 
-       new_opp->dev_opp = dev_opp;
+       new_opp->opp_table = opp_table;
        list_add_rcu(&new_opp->node, head);
 
+       ret = opp_debug_create_one(new_opp, opp_table);
+       if (ret)
+               dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
+                       __func__, ret);
+
+       if (!_opp_supported_by_regulators(new_opp, opp_table)) {
+               new_opp->available = false;
+               dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
+                        __func__, new_opp->rate);
+       }
+
        return 0;
 }
 
@@ -713,14 +1049,14 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  * @u_volt:    Voltage in uVolts for this OPP
  * @dynamic:   Dynamically added OPPs.
  *
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
  * The opp is made available by default and it can be controlled using
  * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  *
  * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  * and freed by dev_pm_opp_of_remove_table.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -736,14 +1072,15 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
 static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
                       bool dynamic)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct dev_pm_opp *new_opp;
+       unsigned long tol;
        int ret;
 
-       /* Hold our list modification lock here */
-       mutex_lock(&dev_opp_list_lock);
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
 
-       new_opp = _allocate_opp(dev, &dev_opp);
+       new_opp = _allocate_opp(dev, &opp_table);
        if (!new_opp) {
                ret = -ENOMEM;
                goto unlock;
@@ -751,60 +1088,77 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
 
        /* populate the opp table */
        new_opp->rate = freq;
+       tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
        new_opp->u_volt = u_volt;
+       new_opp->u_volt_min = u_volt - tol;
+       new_opp->u_volt_max = u_volt + tol;
        new_opp->available = true;
        new_opp->dynamic = dynamic;
 
-       ret = _opp_add(dev, new_opp, dev_opp);
+       ret = _opp_add(dev, new_opp, opp_table);
        if (ret)
                goto free_opp;
 
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
 
        /*
         * Notify the changes in the availability of the operable
         * frequency/voltage list.
         */
-       srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+       srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
        return 0;
 
 free_opp:
-       _opp_remove(dev_opp, new_opp, false);
+       _opp_remove(opp_table, new_opp, false);
 unlock:
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
        return ret;
 }
 
 /* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+                             struct opp_table *opp_table)
 {
        u32 microvolt[3] = {0};
        u32 val;
        int count, ret;
+       struct property *prop = NULL;
+       char name[NAME_MAX];
+
+       /* Search for "opp-microvolt-<name>" */
+       if (opp_table->prop_name) {
+               snprintf(name, sizeof(name), "opp-microvolt-%s",
+                        opp_table->prop_name);
+               prop = of_find_property(opp->np, name, NULL);
+       }
 
-       /* Missing property isn't a problem, but an invalid entry is */
-       if (!of_find_property(opp->np, "opp-microvolt", NULL))
-               return 0;
+       if (!prop) {
+               /* Search for "opp-microvolt" */
+               sprintf(name, "opp-microvolt");
+               prop = of_find_property(opp->np, name, NULL);
 
-       count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+               /* Missing property isn't a problem, but an invalid entry is */
+               if (!prop)
+                       return 0;
+       }
+
+       count = of_property_count_u32_elems(opp->np, name);
        if (count < 0) {
-               dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
-                       __func__, count);
+               dev_err(dev, "%s: Invalid %s property (%d)\n",
+                       __func__, name, count);
                return count;
        }
 
        /* There can be one or three elements here */
        if (count != 1 && count != 3) {
-               dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
-                       __func__, count);
+               dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+                       __func__, name, count);
                return -EINVAL;
        }
 
-       ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
-                                        count);
+       ret = of_property_read_u32_array(opp->np, name, microvolt, count);
        if (ret) {
-               dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
-                       ret);
+               dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
                return -EINVAL;
        }
 
@@ -818,22 +1172,391 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
                opp->u_volt_max = microvolt[2];
        }
 
-       if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+       /* Search for "opp-microamp-<name>" */
+       prop = NULL;
+       if (opp_table->prop_name) {
+               snprintf(name, sizeof(name), "opp-microamp-%s",
+                        opp_table->prop_name);
+               prop = of_find_property(opp->np, name, NULL);
+       }
+
+       if (!prop) {
+               /* Search for "opp-microamp" */
+               sprintf(name, "opp-microamp");
+               prop = of_find_property(opp->np, name, NULL);
+       }
+
+       if (prop && !of_property_read_u32(opp->np, name, &val))
                opp->u_amp = val;
 
        return 0;
 }
 
+/**
+ * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * @dev: Device for which supported-hw has to be set.
+ * @versions: Array of hierarchy of versions to match.
+ * @count: Number of elements in the array.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the hierarchy of versions it supports. OPP layer will then enable
+ * OPPs, which are available for those versions, based on its 'opp-supported-hw'
+ * property.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
+                               unsigned int count)
+{
+       struct opp_table *opp_table;
+       int ret = 0;
+
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
+
+       opp_table = _add_opp_table(dev);
+       if (!opp_table) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       /* Make sure there are no concurrent readers while updating opp_table */
+       WARN_ON(!list_empty(&opp_table->opp_list));
+
+       /* Do we already have a version hierarchy associated with opp_table? */
+       if (opp_table->supported_hw) {
+               dev_err(dev, "%s: Already have supported hardware list\n",
+                       __func__);
+               ret = -EBUSY;
+               goto err;
+       }
+
+       opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
+                                       GFP_KERNEL);
+       if (!opp_table->supported_hw) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       opp_table->supported_hw_count = count;
+       mutex_unlock(&opp_table_lock);
+       return 0;
+
+err:
+       _remove_opp_table(opp_table);
+unlock:
+       mutex_unlock(&opp_table_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
+
+/**
+ * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @dev: Device for which supported-hw has to be put.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
+ * will not be freed.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_supported_hw(struct device *dev)
+{
+       struct opp_table *opp_table;
+
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
+
+       /* Check for existing table for 'dev' first */
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               dev_err(dev, "Failed to find opp_table: %ld\n",
+                       PTR_ERR(opp_table));
+               goto unlock;
+       }
+
+       /* Make sure there are no concurrent readers while updating opp_table */
+       WARN_ON(!list_empty(&opp_table->opp_list));
+
+       if (!opp_table->supported_hw) {
+               dev_err(dev, "%s: Doesn't have supported hardware list\n",
+                       __func__);
+               goto unlock;
+       }
+
+       kfree(opp_table->supported_hw);
+       opp_table->supported_hw = NULL;
+       opp_table->supported_hw_count = 0;
+
+       /* Try freeing opp_table if this was the last blocking resource */
+       _remove_opp_table(opp_table);
+
+unlock:
+       mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
+
+/**
+ * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * @dev: Device for which the prop-name has to be set.
+ * @name: name to postfix to properties.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the extn to be used for certain property names. The properties to
+ * which the extension will apply are opp-microvolt and opp-microamp. OPP core
+ * should postfix the property name with -<name> while looking for them.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+       struct opp_table *opp_table;
+       int ret = 0;
+
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
+
+       opp_table = _add_opp_table(dev);
+       if (!opp_table) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       /* Make sure there are no concurrent readers while updating opp_table */
+       WARN_ON(!list_empty(&opp_table->opp_list));
+
+       /* Do we already have a prop-name associated with opp_table? */
+       if (opp_table->prop_name) {
+               dev_err(dev, "%s: Already have prop-name %s\n", __func__,
+                       opp_table->prop_name);
+               ret = -EBUSY;
+               goto err;
+       }
+
+       opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+       if (!opp_table->prop_name) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       mutex_unlock(&opp_table_lock);
+       return 0;
+
+err:
+       _remove_opp_table(opp_table);
+unlock:
+       mutex_unlock(&opp_table_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
+
+/**
+ * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
+ * @dev: Device for which the prop-name has to be put.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
+ * will not be freed.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_prop_name(struct device *dev)
+{
+       struct opp_table *opp_table;
+
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
+
+       /* Check for existing table for 'dev' first */
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               dev_err(dev, "Failed to find opp_table: %ld\n",
+                       PTR_ERR(opp_table));
+               goto unlock;
+       }
+
+       /* Make sure there are no concurrent readers while updating opp_table */
+       WARN_ON(!list_empty(&opp_table->opp_list));
+
+       if (!opp_table->prop_name) {
+               dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
+               goto unlock;
+       }
+
+       kfree(opp_table->prop_name);
+       opp_table->prop_name = NULL;
+
+       /* Try freeing opp_table if this was the last blocking resource */
+       _remove_opp_table(opp_table);
+
+unlock:
+       mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
+
+/**
+ * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * @dev: Device for which regulator name is being set.
+ * @name: Name of the regulator.
+ *
+ * In order to support OPP switching, OPP layer needs to know the name of the
+ * device's regulator, as the core would be required to switch voltages as well.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+       struct opp_table *opp_table;
+       struct regulator *reg;
+       int ret;
+
+       mutex_lock(&opp_table_lock);
+
+       opp_table = _add_opp_table(dev);
+       if (!opp_table) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       /* This should be called before OPPs are initialized */
+       if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+               ret = -EBUSY;
+               goto err;
+       }
+
+       /* Already have a regulator set */
+       if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+               ret = -EBUSY;
+               goto err;
+       }
+       /* Allocate the regulator */
+       reg = regulator_get_optional(dev, name);
+       if (IS_ERR(reg)) {
+               ret = PTR_ERR(reg);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "%s: no regulator (%s) found: %d\n",
+                               __func__, name, ret);
+               goto err;
+       }
+
+       opp_table->regulator = reg;
+
+       mutex_unlock(&opp_table_lock);
+       return 0;
+
+err:
+       _remove_opp_table(opp_table);
+unlock:
+       mutex_unlock(&opp_table_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+
+/**
+ * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
+ * @dev: Device for which regulator was set.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulator(struct device *dev)
+{
+       struct opp_table *opp_table;
+
+       mutex_lock(&opp_table_lock);
+
+       /* Check for existing table for 'dev' first */
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               dev_err(dev, "Failed to find opp_table: %ld\n",
+                       PTR_ERR(opp_table));
+               goto unlock;
+       }
+
+       if (IS_ERR(opp_table->regulator)) {
+               dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+               goto unlock;
+       }
+
+       /* Make sure there are no concurrent readers while updating opp_table */
+       WARN_ON(!list_empty(&opp_table->opp_list));
+
+       regulator_put(opp_table->regulator);
+       opp_table->regulator = ERR_PTR(-ENXIO);
+
+       /* Try freeing opp_table if this was the last blocking resource */
+       _remove_opp_table(opp_table);
+
+unlock:
+       mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
+                             struct device_node *np)
+{
+       unsigned int count = opp_table->supported_hw_count;
+       u32 version;
+       int ret;
+
+       if (!opp_table->supported_hw)
+               return true;
+
+       while (count--) {
+               ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+                                                &version);
+               if (ret) {
+                       dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+                                __func__, count, ret);
+                       return false;
+               }
+
+               /* Both of these are bitwise masks of the versions */
+               if (!(version & opp_table->supported_hw[count]))
+                       return false;
+       }
+
+       return true;
+}
+
 /**
  * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  * @dev:       device for which we do this operation
  * @np:                device node
  *
- * This function adds an opp definition to the opp list and returns status. The
+ * This function adds an opp definition to the opp table and returns status. The
  * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  * removed by dev_pm_opp_remove.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -849,16 +1572,16 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
  */
 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct dev_pm_opp *new_opp;
        u64 rate;
        u32 val;
        int ret;
 
-       /* Hold our list modification lock here */
-       mutex_lock(&dev_opp_list_lock);
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
 
-       new_opp = _allocate_opp(dev, &dev_opp);
+       new_opp = _allocate_opp(dev, &opp_table);
        if (!new_opp) {
                ret = -ENOMEM;
                goto unlock;
@@ -870,6 +1593,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
                goto free_opp;
        }
 
+       /* Check if the OPP supports hardware's hierarchy of versions or not */
+       if (!_opp_is_supported(dev, opp_table, np)) {
+               dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+               goto free_opp;
+       }
+
        /*
         * Rate is defined as an unsigned long in clk API, and so casting
         * explicitly to its type. Must be fixed once rate is 64 bit
@@ -885,28 +1614,30 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
        if (!of_property_read_u32(np, "clock-latency-ns", &val))
                new_opp->clock_latency_ns = val;
 
-       ret = opp_parse_supplies(new_opp, dev);
+       ret = opp_parse_supplies(new_opp, dev, opp_table);
        if (ret)
                goto free_opp;
 
-       ret = _opp_add(dev, new_opp, dev_opp);
+       ret = _opp_add(dev, new_opp, opp_table);
        if (ret)
                goto free_opp;
 
        /* OPP to select on device suspend */
        if (of_property_read_bool(np, "opp-suspend")) {
-               if (dev_opp->suspend_opp)
+               if (opp_table->suspend_opp) {
                        dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
-                                __func__, dev_opp->suspend_opp->rate,
+                                __func__, opp_table->suspend_opp->rate,
                                 new_opp->rate);
-               else
-                       dev_opp->suspend_opp = new_opp;
+               } else {
+                       new_opp->suspend = true;
+                       opp_table->suspend_opp = new_opp;
+               }
        }
 
-       if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
-               dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+       if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+               opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
 
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
 
        pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
                 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
@@ -917,13 +1648,13 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
         * Notify the changes in the availability of the operable
         * frequency/voltage list.
         */
-       srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+       srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
        return 0;
 
 free_opp:
-       _opp_remove(dev_opp, new_opp, false);
+       _opp_remove(opp_table, new_opp, false);
 unlock:
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
        return ret;
 }
 
@@ -933,11 +1664,11 @@ unlock:
  * @freq:      Frequency in Hz for this OPP
  * @u_volt:    Voltage in uVolts for this OPP
  *
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
  * The opp is made available by default and it can be controlled using
  * dev_pm_opp_enable/disable functions.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -969,7 +1700,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks to
  * keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -978,7 +1709,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
 static int _opp_set_availability(struct device *dev, unsigned long freq,
                                 bool availability_req)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
        int r = 0;
 
@@ -987,18 +1718,18 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
        if (!new_opp)
                return -ENOMEM;
 
-       mutex_lock(&dev_opp_list_lock);
+       mutex_lock(&opp_table_lock);
 
-       /* Find the device_opp */
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp)) {
-               r = PTR_ERR(dev_opp);
+       /* Find the opp_table */
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               r = PTR_ERR(opp_table);
                dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
                goto unlock;
        }
 
        /* Do we have the frequency? */
-       list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+       list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
                if (tmp_opp->rate == freq) {
                        opp = tmp_opp;
                        break;
@@ -1019,21 +1750,21 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
        new_opp->available = availability_req;
 
        list_replace_rcu(&opp->node, &new_opp->node);
-       mutex_unlock(&dev_opp_list_lock);
-       call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+       mutex_unlock(&opp_table_lock);
+       call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 
        /* Notify the change of the OPP availability */
        if (availability_req)
-               srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
-                                        new_opp);
+               srcu_notifier_call_chain(&opp_table->srcu_head,
+                                        OPP_EVENT_ENABLE, new_opp);
        else
-               srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
-                                        new_opp);
+               srcu_notifier_call_chain(&opp_table->srcu_head,
+                                        OPP_EVENT_DISABLE, new_opp);
 
        return 0;
 
 unlock:
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
        kfree(new_opp);
        return r;
 }
@@ -1047,7 +1778,7 @@ unlock:
  * corresponding error value. It is meant to be used for users an OPP available
  * after being temporarily made unavailable with dev_pm_opp_disable.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
  * integrity of the internal data structures. Callers should ensure that
  * this function is *NOT* called under RCU protection or in contexts where
@@ -1073,7 +1804,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  * control by users to make this OPP not available until the circumstances are
  * right to make it available again (with a call to dev_pm_opp_enable).
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
  * integrity of the internal data structures. Callers should ensure that
  * this function is *NOT* called under RCU protection or in contexts where
@@ -1091,26 +1822,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
 
 /**
  * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
- * @dev:       device pointer used to lookup device OPPs.
+ * @dev:       device pointer used to lookup OPP table.
  *
  * Return: pointer to  notifier head if found, otherwise -ENODEV or
  * -EINVAL based on type of error casted as pointer. value must be checked
  *  with IS_ERR to determine valid pointer or error result.
  *
- * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * Locking: This function must be called under rcu_read_lock(). opp_table is a
+ * RCU protected pointer. The reason for the same is that the opp pointer which
+ * is returned will remain valid for use with opp_get_{voltage, freq} only while
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
 {
-       struct device_opp *dev_opp = _find_device_opp(dev);
+       struct opp_table *opp_table = _find_opp_table(dev);
 
-       if (IS_ERR(dev_opp))
-               return ERR_CAST(dev_opp); /* matching type */
+       if (IS_ERR(opp_table))
+               return ERR_CAST(opp_table); /* matching type */
 
-       return &dev_opp->srcu_head;
+       return &opp_table->srcu_head;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
 
@@ -1118,11 +1849,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
 /**
  * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  *                               entries
- * @dev:       device pointer used to lookup device OPPs.
+ * @dev:       device pointer used to lookup OPP table.
  *
  * Free OPPs created using static entries present in DT.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
@@ -1130,38 +1861,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
  */
 void dev_pm_opp_of_remove_table(struct device *dev)
 {
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct dev_pm_opp *opp, *tmp;
 
-       /* Hold our list modification lock here */
-       mutex_lock(&dev_opp_list_lock);
+       /* Hold our table modification lock here */
+       mutex_lock(&opp_table_lock);
 
-       /* Check for existing list for 'dev' */
-       dev_opp = _find_device_opp(dev);
-       if (IS_ERR(dev_opp)) {
-               int error = PTR_ERR(dev_opp);
+       /* Check for existing table for 'dev' */
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               int error = PTR_ERR(opp_table);
 
                if (error != -ENODEV)
-                       WARN(1, "%s: dev_opp: %d\n",
+                       WARN(1, "%s: opp_table: %d\n",
                             IS_ERR_OR_NULL(dev) ?
                                        "Invalid device" : dev_name(dev),
                             error);
                goto unlock;
        }
 
-       /* Find if dev_opp manages a single device */
-       if (list_is_singular(&dev_opp->dev_list)) {
+       /* Find if opp_table manages a single device */
+       if (list_is_singular(&opp_table->dev_list)) {
                /* Free static OPPs */
-               list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+               list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
                        if (!opp->dynamic)
-                               _opp_remove(dev_opp, opp, true);
+                               _opp_remove(opp_table, opp, true);
                }
        } else {
-               _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+               _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
        }
 
 unlock:
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
 
@@ -1182,22 +1913,22 @@ struct device_node *_of_get_opp_desc_node(struct device *dev)
 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
 {
        struct device_node *np;
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        int ret = 0, count = 0;
 
-       mutex_lock(&dev_opp_list_lock);
+       mutex_lock(&opp_table_lock);
 
-       dev_opp = _managed_opp(opp_np);
-       if (dev_opp) {
+       opp_table = _managed_opp(opp_np);
+       if (opp_table) {
                /* OPPs are already managed */
-               if (!_add_list_dev(dev, dev_opp))
+               if (!_add_opp_dev(dev, opp_table))
                        ret = -ENOMEM;
-               mutex_unlock(&dev_opp_list_lock);
+               mutex_unlock(&opp_table_lock);
                return ret;
        }
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
 
-       /* We have opp-list node now, iterate over it and add OPPs */
+       /* We have opp-table node now, iterate over it and add OPPs */
        for_each_available_child_of_node(opp_np, np) {
                count++;
 
@@ -1213,19 +1944,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
        if (WARN_ON(!count))
                return -ENOENT;
 
-       mutex_lock(&dev_opp_list_lock);
+       mutex_lock(&opp_table_lock);
 
-       dev_opp = _find_device_opp(dev);
-       if (WARN_ON(IS_ERR(dev_opp))) {
-               ret = PTR_ERR(dev_opp);
-               mutex_unlock(&dev_opp_list_lock);
+       opp_table = _find_opp_table(dev);
+       if (WARN_ON(IS_ERR(opp_table))) {
+               ret = PTR_ERR(opp_table);
+               mutex_unlock(&opp_table_lock);
                goto free_table;
        }
 
-       dev_opp->np = opp_np;
-       dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+       opp_table->np = opp_np;
+       opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
 
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
 
        return 0;
 
@@ -1254,7 +1985,7 @@ static int _of_add_opp_table_v1(struct device *dev)
         */
        nr = prop->length / sizeof(u32);
        if (nr % 2) {
-               dev_err(dev, "%s: Invalid OPP list\n", __func__);
+               dev_err(dev, "%s: Invalid OPP table\n", __func__);
                return -EINVAL;
        }
 
@@ -1274,11 +2005,11 @@ static int _of_add_opp_table_v1(struct device *dev)
 
 /**
  * dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev:       device pointer used to lookup device OPPs.
+ * @dev:       device pointer used to lookup OPP table.
  *
  * Register the initial OPP table with the OPP library for given device.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
index 7b445e88a0d559f091918be5e841f6a1380a33da..ba2bdbd932ef3c1ebaff47c6203bddb27fe9c03b 100644 (file)
@@ -31,7 +31,7 @@
  * @table:     Cpufreq table returned back to caller
  *
  * Generate a cpufreq table for a provided device- this assumes that the
- * opp list is already initialized and ready for usage.
+ * opp table is already initialized and ready for usage.
  *
  * This function allocates required memory for the cpufreq table. It is
  * expected that the caller does the required maintenance such as freeing
@@ -44,7 +44,7 @@
  * WARNING: It is  important for the callers to ensure refreshing their copy of
  * the table if any of the mentioned functions have been invoked in the interim.
  *
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
  * Since we just use the regular accessor functions to access the internal data
  * structures, we use RCU read lock inside this function. As a result, users of
  * this function DONOT need to use explicit locks for invoking.
@@ -122,15 +122,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
 /* Required only for V1 bindings, as v2 can manage it from DT itself */
 int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
 {
-       struct device_list_opp *list_dev;
-       struct device_opp *dev_opp;
+       struct opp_device *opp_dev;
+       struct opp_table *opp_table;
        struct device *dev;
        int cpu, ret = 0;
 
-       mutex_lock(&dev_opp_list_lock);
+       mutex_lock(&opp_table_lock);
 
-       dev_opp = _find_device_opp(cpu_dev);
-       if (IS_ERR(dev_opp)) {
+       opp_table = _find_opp_table(cpu_dev);
+       if (IS_ERR(opp_table)) {
                ret = -EINVAL;
                goto unlock;
        }
@@ -146,15 +146,15 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
                        continue;
                }
 
-               list_dev = _add_list_dev(dev, dev_opp);
-               if (!list_dev) {
-                       dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+               opp_dev = _add_opp_dev(dev, opp_table);
+               if (!opp_dev) {
+                       dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
                                __func__, cpu);
                        continue;
                }
        }
 unlock:
-       mutex_unlock(&dev_opp_list_lock);
+       mutex_unlock(&opp_table_lock);
 
        return ret;
 }
@@ -214,7 +214,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
 /*
  * Works only for OPP v2 bindings.
  *
- * cpumask should be already set to mask of cpu_dev->id.
  * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  */
 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
@@ -230,6 +229,8 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask
                return -ENOENT;
        }
 
+       cpumask_set_cpu(cpu_dev->id, cpumask);
+
        /* OPPs are shared ? */
        if (!of_property_read_bool(np, "opp-shared"))
                goto put_cpu_node;
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
new file mode 100644 (file)
index 0000000..ef1ae6b
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Generic OPP debugfs interface
+ *
+ * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/limits.h>
+
+#include "opp.h"
+
+static struct dentry *rootdir;
+
+static void opp_set_dev_name(const struct device *dev, char *name)
+{
+       if (dev->parent)
+               snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent),
+                        dev_name(dev));
+       else
+               snprintf(name, NAME_MAX, "%s", dev_name(dev));
+}
+
+void opp_debug_remove_one(struct dev_pm_opp *opp)
+{
+       debugfs_remove_recursive(opp->dentry);
+}
+
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
+{
+       struct dentry *pdentry = opp_table->dentry;
+       struct dentry *d;
+       char name[25];  /* 20 chars for 64 bit value + 5 (opp:\0) */
+
+       /* Rate is unique to each OPP, use it to give opp-name */
+       snprintf(name, sizeof(name), "opp:%lu", opp->rate);
+
+       /* Create per-opp directory */
+       d = debugfs_create_dir(name, pdentry);
+       if (!d)
+               return -ENOMEM;
+
+       if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
+               return -ENOMEM;
+
+       if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
+               return -ENOMEM;
+
+       if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
+               return -ENOMEM;
+
+       if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
+               return -ENOMEM;
+
+       if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
+               return -ENOMEM;
+
+       if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
+               return -ENOMEM;
+
+       if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
+               return -ENOMEM;
+
+       if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
+               return -ENOMEM;
+
+       if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+               return -ENOMEM;
+
+       if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+                                 &opp->clock_latency_ns))
+               return -ENOMEM;
+
+       opp->dentry = d;
+       return 0;
+}
+
+static int opp_list_debug_create_dir(struct opp_device *opp_dev,
+                                    struct opp_table *opp_table)
+{
+       const struct device *dev = opp_dev->dev;
+       struct dentry *d;
+
+       opp_set_dev_name(dev, opp_table->dentry_name);
+
+       /* Create device specific directory */
+       d = debugfs_create_dir(opp_table->dentry_name, rootdir);
+       if (!d) {
+               dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
+               return -ENOMEM;
+       }
+
+       opp_dev->dentry = d;
+       opp_table->dentry = d;
+
+       return 0;
+}
+
+static int opp_list_debug_create_link(struct opp_device *opp_dev,
+                                     struct opp_table *opp_table)
+{
+       const struct device *dev = opp_dev->dev;
+       char name[NAME_MAX];
+       struct dentry *d;
+
+       opp_set_dev_name(opp_dev->dev, name);
+
+       /* Create device specific directory link */
+       d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
+       if (!d) {
+               dev_err(dev, "%s: Failed to create link\n", __func__);
+               return -ENOMEM;
+       }
+
+       opp_dev->dentry = d;
+
+       return 0;
+}
+
+/**
+ * opp_debug_register - add a device opp node to the debugfs 'opp' directory
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being added
+ *
+ * Dynamically adds device specific directory in debugfs 'opp' directory. If the
+ * device-opp is shared with other devices, then links will be created for all
+ * devices except the first.
+ *
+ * Return: 0 on success, otherwise negative error.
+ */
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
+{
+       if (!rootdir) {
+               pr_debug("%s: Uninitialized rootdir\n", __func__);
+               return -EINVAL;
+       }
+
+       if (opp_table->dentry)
+               return opp_list_debug_create_link(opp_dev, opp_table);
+
+       return opp_list_debug_create_dir(opp_dev, opp_table);
+}
+
+static void opp_migrate_dentry(struct opp_device *opp_dev,
+                              struct opp_table *opp_table)
+{
+       struct opp_device *new_dev;
+       const struct device *dev;
+       struct dentry *dentry;
+
+       /* Look for next opp-dev */
+       list_for_each_entry(new_dev, &opp_table->dev_list, node)
+               if (new_dev != opp_dev)
+                       break;
+
+       /* new_dev is guaranteed to be valid here */
+       dev = new_dev->dev;
+       debugfs_remove_recursive(new_dev->dentry);
+
+       opp_set_dev_name(dev, opp_table->dentry_name);
+
+       dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
+                               opp_table->dentry_name);
+       if (!dentry) {
+               dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
+                       __func__, dev_name(opp_dev->dev), dev_name(dev));
+               return;
+       }
+
+       new_dev->dentry = dentry;
+       opp_table->dentry = dentry;
+}
+
+/**
+ * opp_debug_unregister - remove a device opp node from debugfs opp directory
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being removed
+ *
+ * Dynamically removes device specific directory from debugfs 'opp' directory.
+ */
+void opp_debug_unregister(struct opp_device *opp_dev,
+                         struct opp_table *opp_table)
+{
+       if (opp_dev->dentry == opp_table->dentry) {
+               /* Move the real dentry object under another device */
+               if (!list_is_singular(&opp_table->dev_list)) {
+                       opp_migrate_dentry(opp_dev, opp_table);
+                       goto out;
+               }
+               opp_table->dentry = NULL;
+       }
+
+       debugfs_remove_recursive(opp_dev->dentry);
+
+out:
+       opp_dev->dentry = NULL;
+}
+
+static int __init opp_debug_init(void)
+{
+       /* Create /sys/kernel/debug/opp directory */
+       rootdir = debugfs_create_dir("opp", NULL);
+       if (!rootdir) {
+               pr_err("%s: Failed to create root directory\n", __func__);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+core_initcall(opp_debug_init);
index 7366b2aa8997897f89890cf99a4990d83e61a5a6..f67f806fcf3ae8f13866336cdc54958bd57f59b9 100644 (file)
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/limits.h>
 #include <linux/pm_opp.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 
+struct clk;
+struct regulator;
+
 /* Lock to allow exclusive modification to the device and opp lists */
-extern struct mutex dev_opp_list_lock;
+extern struct mutex opp_table_lock;
 
 /*
  * Internal data structure organization with the OPP layer library is as
  * follows:
- * dev_opp_list (root)
+ * opp_tables (root)
  *     |- device 1 (represents voltage domain 1)
  *     |       |- opp 1 (availability, freq, voltage)
  *     |       |- opp 2 ..
@@ -36,23 +40,24 @@ extern struct mutex dev_opp_list_lock;
  *     |- device 2 (represents the next voltage domain)
  *     ...
  *     `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
+ * device 1, 2.. are represented by opp_table structure while each opp
  * is represented by the opp structure.
  */
 
 /**
  * struct dev_pm_opp - Generic OPP description structure
- * @node:      opp list node. The nodes are maintained throughout the lifetime
+ * @node:      opp table node. The nodes are maintained throughout the lifetime
  *             of boot. It is expected only an optimal set of OPPs are
  *             added to the library by the SoC framework.
- *             RCU usage: opp list is traversed with RCU locks. node
+ *             RCU usage: opp table is traversed with RCU locks. node
  *             modification is possible realtime, hence the modifications
- *             are protected by the dev_opp_list_lock for integrity.
+ *             are protected by the opp_table_lock for integrity.
  *             IMPORTANT: the opp nodes should be maintained in increasing
  *             order.
- * @dynamic:   not-created from static DT entries.
  * @available: true/false - marks if this OPP as available or not
+ * @dynamic:   not-created from static DT entries.
  * @turbo:     true if turbo (boost) OPP
+ * @suspend:   true if suspend OPP
  * @rate:      Frequency in hertz
  * @u_volt:    Target voltage in microvolts corresponding to this OPP
  * @u_volt_min:        Minimum voltage in microvolts corresponding to this OPP
@@ -60,9 +65,10 @@ extern struct mutex dev_opp_list_lock;
  * @u_amp:     Maximum current drawn by the device in microamperes
  * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
  *             frequency from any other OPP's frequency.
- * @dev_opp:   points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
  * @rcu_head:  RCU callback head used for deferred freeing
  * @np:                OPP's device node.
+ * @dentry:    debugfs dentry pointer (per opp)
  *
  * This structure stores the OPP information for a given device.
  */
@@ -72,6 +78,7 @@ struct dev_pm_opp {
        bool available;
        bool dynamic;
        bool turbo;
+       bool suspend;
        unsigned long rate;
 
        unsigned long u_volt;
@@ -80,40 +87,60 @@ struct dev_pm_opp {
        unsigned long u_amp;
        unsigned long clock_latency_ns;
 
-       struct device_opp *dev_opp;
+       struct opp_table *opp_table;
        struct rcu_head rcu_head;
 
        struct device_node *np;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *dentry;
+#endif
 };
 
 /**
- * struct device_list_opp - devices managed by 'struct device_opp'
+ * struct opp_device - devices managed by 'struct opp_table'
  * @node:      list node
  * @dev:       device to which the struct object belongs
  * @rcu_head:  RCU callback head used for deferred freeing
+ * @dentry:    debugfs dentry pointer (per device)
  *
- * This is an internal data structure maintaining the list of devices that are
- * managed by 'struct device_opp'.
+ * This is an internal data structure maintaining the devices that are managed
+ * by 'struct opp_table'.
  */
-struct device_list_opp {
+struct opp_device {
        struct list_head node;
        const struct device *dev;
        struct rcu_head rcu_head;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *dentry;
+#endif
 };
 
 /**
- * struct device_opp - Device opp structure
- * @node:      list node - contains the devices with OPPs that
+ * struct opp_table - Device opp structure
+ * @node:      table node - contains the devices with OPPs that
  *             have been registered. Nodes once added are not modified in this
- *             list.
- *             RCU usage: nodes are not modified in the list of device_opp,
- *             however addition is possible and is secured by dev_opp_list_lock
+ *             table.
+ *             RCU usage: nodes are not modified in the table of opp_table,
+ *             however addition is possible and is secured by opp_table_lock
  * @srcu_head: notifier head to notify the OPP availability changes.
  * @rcu_head:  RCU callback head used for deferred freeing
  * @dev_list:  list of devices that share these OPPs
- * @opp_list:  list of opps
+ * @opp_list:  table of opps
  * @np:                struct device_node pointer for opp's DT node.
+ * @clock_latency_ns_max: Max clock latency in nanoseconds.
  * @shared_opp: OPP is shared between multiple devices.
+ * @suspend_opp: Pointer to OPP to be used during device suspend.
+ * @supported_hw: Array of version number to support.
+ * @supported_hw_count: Number of elements in supported_hw array.
+ * @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulator: Supply regulator
+ * @dentry:    debugfs dentry pointer of the real device directory (not links).
+ * @dentry_name: Name of the real dentry.
+ *
+ * @voltage_tolerance_v1: In percentage, for v1 bindings only.
  *
  * This is an internal data structure maintaining the link to opps attached to
  * a device. This structure is not meant to be shared to users as it is
@@ -123,7 +150,7 @@ struct device_list_opp {
  * need to wait for the grace period of both of them before freeing any
  * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
  */
-struct device_opp {
+struct opp_table {
        struct list_head node;
 
        struct srcu_notifier_head srcu_head;
@@ -133,14 +160,48 @@ struct device_opp {
 
        struct device_node *np;
        unsigned long clock_latency_ns_max;
+
+       /* For backward compatibility with v1 bindings */
+       unsigned int voltage_tolerance_v1;
+
        bool shared_opp;
        struct dev_pm_opp *suspend_opp;
+
+       unsigned int *supported_hw;
+       unsigned int supported_hw_count;
+       const char *prop_name;
+       struct clk *clk;
+       struct regulator *regulator;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *dentry;
+       char dentry_name[NAME_MAX];
+#endif
 };
 
 /* Routines internal to opp core */
-struct device_opp *_find_device_opp(struct device *dev);
-struct device_list_opp *_add_list_dev(const struct device *dev,
-                                     struct device_opp *dev_opp);
+struct opp_table *_find_opp_table(struct device *dev);
+struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
 struct device_node *_of_get_opp_desc_node(struct device *dev);
 
+#ifdef CONFIG_DEBUG_FS
+void opp_debug_remove_one(struct dev_pm_opp *opp);
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
+#else
+static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
+
+static inline int opp_debug_create_one(struct dev_pm_opp *opp,
+                                      struct opp_table *opp_table)
+{ return 0; }
+static inline int opp_debug_register(struct opp_device *opp_dev,
+                                    struct opp_table *opp_table)
+{ return 0; }
+
+static inline void opp_debug_unregister(struct opp_device *opp_dev,
+                                       struct opp_table *opp_table)
+{ }
+#endif         /* DEBUG_FS */
+
 #endif         /* __DRIVER_OPP_H__ */
index 90d64081ddb34ee8ba7a06372a269defdcf07a97..f951f911786e086b2b6dc9d615018eb235347dbe 100644 (file)
@@ -31,9 +31,8 @@
 
 struct private_data {
        struct device *cpu_dev;
-       struct regulator *cpu_reg;
        struct thermal_cooling_device *cdev;
-       unsigned int voltage_tolerance; /* in percentage */
+       const char *reg_name;
 };
 
 static struct freq_attr *cpufreq_dt_attr[] = {
@@ -44,175 +43,128 @@ static struct freq_attr *cpufreq_dt_attr[] = {
 
 static int set_target(struct cpufreq_policy *policy, unsigned int index)
 {
-       struct dev_pm_opp *opp;
-       struct cpufreq_frequency_table *freq_table = policy->freq_table;
-       struct clk *cpu_clk = policy->clk;
        struct private_data *priv = policy->driver_data;
-       struct device *cpu_dev = priv->cpu_dev;
-       struct regulator *cpu_reg = priv->cpu_reg;
-       unsigned long volt = 0, volt_old = 0, tol = 0;
-       unsigned int old_freq, new_freq;
-       long freq_Hz, freq_exact;
-       int ret;
-
-       freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
-       if (freq_Hz <= 0)
-               freq_Hz = freq_table[index].frequency * 1000;
 
-       freq_exact = freq_Hz;
-       new_freq = freq_Hz / 1000;
-       old_freq = clk_get_rate(cpu_clk) / 1000;
+       return dev_pm_opp_set_rate(priv->cpu_dev,
+                                  policy->freq_table[index].frequency * 1000);
+}
 
-       if (!IS_ERR(cpu_reg)) {
-               unsigned long opp_freq;
+/*
+ * An earlier version of opp-v1 bindings used to name the regulator
+ * "cpu0-supply", we still need to handle that for backwards compatibility.
+ */
+static const char *find_supply_name(struct device *dev)
+{
+       struct device_node *np;
+       struct property *pp;
+       int cpu = dev->id;
+       const char *name = NULL;
 
-               rcu_read_lock();
-               opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
-               if (IS_ERR(opp)) {
-                       rcu_read_unlock();
-                       dev_err(cpu_dev, "failed to find OPP for %ld\n",
-                               freq_Hz);
-                       return PTR_ERR(opp);
-               }
-               volt = dev_pm_opp_get_voltage(opp);
-               opp_freq = dev_pm_opp_get_freq(opp);
-               rcu_read_unlock();
-               tol = volt * priv->voltage_tolerance / 100;
-               volt_old = regulator_get_voltage(cpu_reg);
-               dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
-                       opp_freq / 1000, volt);
-       }
+       np = of_node_get(dev->of_node);
 
-       dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
-               old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
-               new_freq / 1000, volt ? volt / 1000 : -1);
+       /* This must be valid for sure */
+       if (WARN_ON(!np))
+               return NULL;
 
-       /* scaling up?  scale voltage before frequency */
-       if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
-               ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
-               if (ret) {
-                       dev_err(cpu_dev, "failed to scale voltage up: %d\n",
-                               ret);
-                       return ret;
+       /* Try "cpu0" for older DTs */
+       if (!cpu) {
+               pp = of_find_property(np, "cpu0-supply", NULL);
+               if (pp) {
+                       name = "cpu0";
+                       goto node_put;
                }
        }
 
-       ret = clk_set_rate(cpu_clk, freq_exact);
-       if (ret) {
-               dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
-               if (!IS_ERR(cpu_reg) && volt_old > 0)
-                       regulator_set_voltage_tol(cpu_reg, volt_old, tol);
-               return ret;
+       pp = of_find_property(np, "cpu-supply", NULL);
+       if (pp) {
+               name = "cpu";
+               goto node_put;
        }
 
-       /* scaling down?  scale voltage after frequency */
-       if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
-               ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
-               if (ret) {
-                       dev_err(cpu_dev, "failed to scale voltage down: %d\n",
-                               ret);
-                       clk_set_rate(cpu_clk, old_freq * 1000);
-               }
-       }
-
-       return ret;
+       dev_dbg(dev, "no regulator for cpu%d\n", cpu);
+node_put:
+       of_node_put(np);
+       return name;
 }
 
-static int allocate_resources(int cpu, struct device **cdev,
-                             struct regulator **creg, struct clk **cclk)
+static int resources_available(void)
 {
        struct device *cpu_dev;
        struct regulator *cpu_reg;
        struct clk *cpu_clk;
        int ret = 0;
-       char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
+       const char *name;
 
-       cpu_dev = get_cpu_device(cpu);
+       cpu_dev = get_cpu_device(0);
        if (!cpu_dev) {
-               pr_err("failed to get cpu%d device\n", cpu);
+               pr_err("failed to get cpu0 device\n");
                return -ENODEV;
        }
 
-       /* Try "cpu0" for older DTs */
-       if (!cpu)
-               reg = reg_cpu0;
-       else
-               reg = reg_cpu;
-
-try_again:
-       cpu_reg = regulator_get_optional(cpu_dev, reg);
-       if (IS_ERR(cpu_reg)) {
+       cpu_clk = clk_get(cpu_dev, NULL);
+       ret = PTR_ERR_OR_ZERO(cpu_clk);
+       if (ret) {
                /*
-                * If cpu's regulator supply node is present, but regulator is
-                * not yet registered, we should try defering probe.
+                * If cpu's clk node is present, but clock is not yet
+                * registered, we should try defering probe.
                 */
-               if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
-                       dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
-                               cpu);
-                       return -EPROBE_DEFER;
-               }
-
-               /* Try with "cpu-supply" */
-               if (reg == reg_cpu0) {
-                       reg = reg_cpu;
-                       goto try_again;
-               }
+               if (ret == -EPROBE_DEFER)
+                       dev_dbg(cpu_dev, "clock not ready, retry\n");
+               else
+                       dev_err(cpu_dev, "failed to get clock: %d\n", ret);
 
-               dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
-                       cpu, PTR_ERR(cpu_reg));
+               return ret;
        }
 
-       cpu_clk = clk_get(cpu_dev, NULL);
-       if (IS_ERR(cpu_clk)) {
-               /* put regulator */
-               if (!IS_ERR(cpu_reg))
-                       regulator_put(cpu_reg);
+       clk_put(cpu_clk);
 
-               ret = PTR_ERR(cpu_clk);
+       name = find_supply_name(cpu_dev);
+       /* Platform doesn't require regulator */
+       if (!name)
+               return 0;
 
+       cpu_reg = regulator_get_optional(cpu_dev, name);
+       ret = PTR_ERR_OR_ZERO(cpu_reg);
+       if (ret) {
                /*
-                * If cpu's clk node is present, but clock is not yet
-                * registered, we should try defering probe.
+                * If cpu's regulator supply node is present, but regulator is
+                * not yet registered, we should try defering probe.
                 */
                if (ret == -EPROBE_DEFER)
-                       dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
+                       dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
                else
-                       dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
-                               ret);
-       } else {
-               *cdev = cpu_dev;
-               *creg = cpu_reg;
-               *cclk = cpu_clk;
+                       dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
+
+               return ret;
        }
 
-       return ret;
+       regulator_put(cpu_reg);
+       return 0;
 }
 
 static int cpufreq_init(struct cpufreq_policy *policy)
 {
        struct cpufreq_frequency_table *freq_table;
-       struct device_node *np;
        struct private_data *priv;
        struct device *cpu_dev;
-       struct regulator *cpu_reg;
        struct clk *cpu_clk;
        struct dev_pm_opp *suspend_opp;
-       unsigned long min_uV = ~0, max_uV = 0;
        unsigned int transition_latency;
-       bool need_update = false;
+       bool opp_v1 = false;
+       const char *name;
        int ret;
 
-       ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
-       if (ret) {
-               pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
-               return ret;
+       cpu_dev = get_cpu_device(policy->cpu);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu%d device\n", policy->cpu);
+               return -ENODEV;
        }
 
-       np = of_node_get(cpu_dev->of_node);
-       if (!np) {
-               dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
-               ret = -ENOENT;
-               goto out_put_reg_clk;
+       cpu_clk = clk_get(cpu_dev, NULL);
+       if (IS_ERR(cpu_clk)) {
+               ret = PTR_ERR(cpu_clk);
+               dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
+               return ret;
        }
 
        /* Get OPP-sharing information from "operating-points-v2" bindings */
@@ -223,9 +175,23 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                 * finding shared-OPPs for backward compatibility.
                 */
                if (ret == -ENOENT)
-                       need_update = true;
+                       opp_v1 = true;
                else
-                       goto out_node_put;
+                       goto out_put_clk;
+       }
+
+       /*
+        * OPP layer will be taking care of regulators now, but it needs to know
+        * the name of the regulator first.
+        */
+       name = find_supply_name(cpu_dev);
+       if (name) {
+               ret = dev_pm_opp_set_regulator(cpu_dev, name);
+               if (ret) {
+                       dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
+                               policy->cpu, ret);
+                       goto out_put_clk;
+               }
        }
 
        /*
@@ -246,12 +212,12 @@ static int cpufreq_init(struct cpufreq_policy *policy)
         */
        ret = dev_pm_opp_get_opp_count(cpu_dev);
        if (ret <= 0) {
-               pr_debug("OPP table is not ready, deferring probe\n");
+               dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
                ret = -EPROBE_DEFER;
                goto out_free_opp;
        }
 
-       if (need_update) {
+       if (opp_v1) {
                struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
 
                if (!pd || !pd->independent_clocks)
@@ -265,10 +231,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                if (ret)
                        dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
                                __func__, ret);
-
-               of_property_read_u32(np, "clock-latency", &transition_latency);
-       } else {
-               transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
        }
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -277,62 +239,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                goto out_free_opp;
        }
 
-       of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
-
-       if (!transition_latency)
-               transition_latency = CPUFREQ_ETERNAL;
-
-       if (!IS_ERR(cpu_reg)) {
-               unsigned long opp_freq = 0;
-
-               /*
-                * Disable any OPPs where the connected regulator isn't able to
-                * provide the specified voltage and record minimum and maximum
-                * voltage levels.
-                */
-               while (1) {
-                       struct dev_pm_opp *opp;
-                       unsigned long opp_uV, tol_uV;
-
-                       rcu_read_lock();
-                       opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
-                       if (IS_ERR(opp)) {
-                               rcu_read_unlock();
-                               break;
-                       }
-                       opp_uV = dev_pm_opp_get_voltage(opp);
-                       rcu_read_unlock();
-
-                       tol_uV = opp_uV * priv->voltage_tolerance / 100;
-                       if (regulator_is_supported_voltage(cpu_reg,
-                                                          opp_uV - tol_uV,
-                                                          opp_uV + tol_uV)) {
-                               if (opp_uV < min_uV)
-                                       min_uV = opp_uV;
-                               if (opp_uV > max_uV)
-                                       max_uV = opp_uV;
-                       } else {
-                               dev_pm_opp_disable(cpu_dev, opp_freq);
-                       }
-
-                       opp_freq++;
-               }
-
-               ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
-               if (ret > 0)
-                       transition_latency += ret * 1000;
-       }
+       priv->reg_name = name;
 
        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
-               pr_err("failed to init cpufreq table: %d\n", ret);
+               dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
                goto out_free_priv;
        }
 
        priv->cpu_dev = cpu_dev;
-       priv->cpu_reg = cpu_reg;
        policy->driver_data = priv;
-
        policy->clk = cpu_clk;
 
        rcu_read_lock();
@@ -357,9 +273,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
        }
 
-       policy->cpuinfo.transition_latency = transition_latency;
+       transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+       if (!transition_latency)
+               transition_latency = CPUFREQ_ETERNAL;
 
-       of_node_put(np);
+       policy->cpuinfo.transition_latency = transition_latency;
 
        return 0;
 
@@ -369,12 +287,10 @@ out_free_priv:
        kfree(priv);
 out_free_opp:
        dev_pm_opp_of_cpumask_remove_table(policy->cpus);
-out_node_put:
-       of_node_put(np);
-out_put_reg_clk:
+       if (name)
+               dev_pm_opp_put_regulator(cpu_dev);
+out_put_clk:
        clk_put(cpu_clk);
-       if (!IS_ERR(cpu_reg))
-               regulator_put(cpu_reg);
 
        return ret;
 }
@@ -386,9 +302,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
        cpufreq_cooling_unregister(priv->cdev);
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
        dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+       if (priv->reg_name)
+               dev_pm_opp_put_regulator(priv->cpu_dev);
+
        clk_put(policy->clk);
-       if (!IS_ERR(priv->cpu_reg))
-               regulator_put(priv->cpu_reg);
        kfree(priv);
 
        return 0;
@@ -407,8 +324,13 @@ static void cpufreq_ready(struct cpufreq_policy *policy)
         * thermal DT code takes care of matching them.
         */
        if (of_find_property(np, "#cooling-cells", NULL)) {
-               priv->cdev = of_cpufreq_cooling_register(np,
-                                                        policy->related_cpus);
+               u32 power_coefficient = 0;
+
+               of_property_read_u32(np, "dynamic-power-coefficient",
+                                    &power_coefficient);
+
+               priv->cdev = of_cpufreq_power_cooling_register(np,
+                               policy->related_cpus, power_coefficient, NULL);
                if (IS_ERR(priv->cdev)) {
                        dev_err(priv->cpu_dev,
                                "running cpufreq without cooling device: %ld\n",
@@ -436,9 +358,6 @@ static struct cpufreq_driver dt_cpufreq_driver = {
 
 static int dt_cpufreq_probe(struct platform_device *pdev)
 {
-       struct device *cpu_dev;
-       struct regulator *cpu_reg;
-       struct clk *cpu_clk;
        int ret;
 
        /*
@@ -448,19 +367,15 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
         *
         * FIXME: Is checking this only for CPU0 sufficient ?
         */
-       ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+       ret = resources_available();
        if (ret)
                return ret;
 
-       clk_put(cpu_clk);
-       if (!IS_ERR(cpu_reg))
-               regulator_put(cpu_reg);
-
        dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
 
        ret = cpufreq_register_driver(&dt_cpufreq_driver);
        if (ret)
-               dev_err(cpu_dev, "failed register driver: %d\n", ret);
+               dev_err(&pdev->dev, "failed register driver: %d\n", ret);
 
        return ret;
 }
index 3c0467d3688cff14df877fea66d61c3fbb3279bd..c4098748e1fe8d5096a7161c2b08e54f80ae1d71 100644 (file)
@@ -8,7 +8,7 @@ cflags-$(CONFIG_X86_32)         := -march=i386
 cflags-$(CONFIG_X86_64)                := -mcmodel=small
 cflags-$(CONFIG_X86)           += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
                                   -fPIC -fno-strict-aliasing -mno-red-zone \
-                                  -mno-mmx -mno-sse -DDISABLE_BRANCH_PROFILING
+                                  -mno-mmx -mno-sse
 
 cflags-$(CONFIG_ARM64)         := $(subst -pg,,$(KBUILD_CFLAGS))
 cflags-$(CONFIG_ARM)           := $(subst -pg,,$(KBUILD_CFLAGS)) \
@@ -16,7 +16,7 @@ cflags-$(CONFIG_ARM)          := $(subst -pg,,$(KBUILD_CFLAGS)) \
 
 cflags-$(CONFIG_EFI_ARMSTUB)   += -I$(srctree)/scripts/dtc/libfdt
 
-KBUILD_CFLAGS                  := $(cflags-y) \
+KBUILD_CFLAGS                  := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
                                   $(call cc-option,-ffreestanding) \
                                   $(call cc-option,-fno-stack-protector)
 
@@ -34,7 +34,8 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
 lib-$(CONFIG_EFI_ARMSTUB)      += arm-stub.o fdt.o string.o \
                                   $(patsubst %.c,lib-%.o,$(arm-deps))
 
-lib-$(CONFIG_ARM64)            += arm64-stub.o
+lib-$(CONFIG_ARM)              += arm32-stub.o
+lib-$(CONFIG_ARM64)            += arm64-stub.o random.o
 CFLAGS_arm64-stub.o            := -DTEXT_OFFSET=$(TEXT_OFFSET)
 
 #
index 950c87f5d279335210088e4154eda135b24304d5..d5aa1d16154f5cb100e865a1bfbb8e0e49fb542c 100644 (file)
@@ -18,6 +18,8 @@
 
 #include "efistub.h"
 
+bool __nokaslr;
+
 static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
 {
        static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
@@ -207,14 +209,6 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
                pr_efi_err(sys_table, "Failed to find DRAM base\n");
                goto fail;
        }
-       status = handle_kernel_image(sys_table, image_addr, &image_size,
-                                    &reserve_addr,
-                                    &reserve_size,
-                                    dram_base, image);
-       if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table, "Failed to relocate kernel\n");
-               goto fail;
-       }
 
        /*
         * Get the command line from EFI, using the LOADED_IMAGE
@@ -224,7 +218,28 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
        if (!cmdline_ptr) {
                pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
-               goto fail_free_image;
+               goto fail;
+       }
+
+       /* check whether 'nokaslr' was passed on the command line */
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+               static const u8 default_cmdline[] = CONFIG_CMDLINE;
+               const u8 *str, *cmdline = cmdline_ptr;
+
+               if (IS_ENABLED(CONFIG_CMDLINE_FORCE))
+                       cmdline = default_cmdline;
+               str = strstr(cmdline, "nokaslr");
+               if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
+                       __nokaslr = true;
+       }
+
+       status = handle_kernel_image(sys_table, image_addr, &image_size,
+                                    &reserve_addr,
+                                    &reserve_size,
+                                    dram_base, image);
+       if (status != EFI_SUCCESS) {
+               pr_efi_err(sys_table, "Failed to relocate kernel\n");
+               goto fail_free_cmdline;
        }
 
        status = efi_parse_options(cmdline_ptr);
@@ -244,7 +259,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
 
                if (status != EFI_SUCCESS) {
                        pr_efi_err(sys_table, "Failed to load device tree!\n");
-                       goto fail_free_cmdline;
+                       goto fail_free_image;
                }
        }
 
@@ -286,12 +301,11 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        efi_free(sys_table, initrd_size, initrd_addr);
        efi_free(sys_table, fdt_size, fdt_addr);
 
-fail_free_cmdline:
-       efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
-
 fail_free_image:
        efi_free(sys_table, image_size, *image_addr);
        efi_free(sys_table, reserve_size, reserve_addr);
+fail_free_cmdline:
+       efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
 fail:
        return EFI_ERROR;
 }
index 78dfbd34b6bffd2fa36312da89dc6ca43f036c3c..e0e6b74fef8f7becdef4c0481919ead298023250 100644 (file)
 #include <asm/efi.h>
 #include <asm/sections.h>
 
+#include "efistub.h"
+
+extern bool __nokaslr;
+
 efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
                                        unsigned long *image_addr,
                                        unsigned long *image_size,
@@ -23,26 +27,52 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
 {
        efi_status_t status;
        unsigned long kernel_size, kernel_memsize = 0;
-       unsigned long nr_pages;
        void *old_image_addr = (void *)*image_addr;
        unsigned long preferred_offset;
+       u64 phys_seed = 0;
+
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+               if (!__nokaslr) {
+                       status = efi_get_random_bytes(sys_table_arg,
+                                                     sizeof(phys_seed),
+                                                     (u8 *)&phys_seed);
+                       if (status == EFI_NOT_FOUND) {
+                               pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+                       } else if (status != EFI_SUCCESS) {
+                               pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+                               return status;
+                       }
+               } else {
+                       pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+               }
+       }
 
        /*
         * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
         * a 2 MB aligned base, which itself may be lower than dram_base, as
         * long as the resulting offset equals or exceeds it.
         */
-       preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
+       preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET;
        if (preferred_offset < dram_base)
-               preferred_offset += SZ_2M;
+               preferred_offset += MIN_KIMG_ALIGN;
 
-       /* Relocate the image, if required. */
        kernel_size = _edata - _text;
-       if (*image_addr != preferred_offset) {
-               kernel_memsize = kernel_size + (_end - _edata);
+       kernel_memsize = kernel_size + (_end - _edata);
+
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+               /*
+                * If KASLR is enabled, and we have some randomness available,
+                * locate the kernel at a randomized offset in physical memory.
+                */
+               *reserve_size = kernel_memsize + TEXT_OFFSET;
+               status = efi_random_alloc(sys_table_arg, *reserve_size,
+                                         MIN_KIMG_ALIGN, reserve_addr,
+                                         phys_seed);
 
+               *image_addr = *reserve_addr + TEXT_OFFSET;
+       } else {
                /*
-                * First, try a straight allocation at the preferred offset.
+                * Else, try a straight allocation at the preferred offset.
                 * This will work around the issue where, if dram_base == 0x0,
                 * efi_low_alloc() refuses to allocate at 0x0 (to prevent the
                 * address of the allocation to be mistaken for a FAIL return
@@ -52,27 +82,31 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
                 * Mustang), we can still place the kernel at the address
                 * 'dram_base + TEXT_OFFSET'.
                 */
+               if (*image_addr == preferred_offset)
+                       return EFI_SUCCESS;
+
                *image_addr = *reserve_addr = preferred_offset;
-               nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
-                          EFI_PAGE_SIZE;
+               *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
+
                status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
-                                       EFI_LOADER_DATA, nr_pages,
+                                       EFI_LOADER_DATA,
+                                       *reserve_size / EFI_PAGE_SIZE,
                                        (efi_physical_addr_t *)reserve_addr);
-               if (status != EFI_SUCCESS) {
-                       kernel_memsize += TEXT_OFFSET;
-                       status = efi_low_alloc(sys_table_arg, kernel_memsize,
-                                              SZ_2M, reserve_addr);
+       }
 
-                       if (status != EFI_SUCCESS) {
-                               pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
-                               return status;
-                       }
-                       *image_addr = *reserve_addr + TEXT_OFFSET;
+       if (status != EFI_SUCCESS) {
+               *reserve_size = kernel_memsize + TEXT_OFFSET;
+               status = efi_low_alloc(sys_table_arg, *reserve_size,
+                                      MIN_KIMG_ALIGN, reserve_addr);
+
+               if (status != EFI_SUCCESS) {
+                       pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+                       *reserve_size = 0;
+                       return status;
                }
-               memcpy((void *)*image_addr, old_image_addr, kernel_size);
-               *reserve_size = kernel_memsize;
+               *image_addr = *reserve_addr + TEXT_OFFSET;
        }
-
+       memcpy((void *)*image_addr, old_image_addr, kernel_size);
 
        return EFI_SUCCESS;
 }
index f07d4a67fa76b3a3cb542e31a24a093c6f7aff97..29ed2f9b218ca9892bfcc72da2d91ba4750f4c97 100644 (file)
@@ -649,6 +649,10 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
        return dst;
 }
 
+#ifndef MAX_CMDLINE_ADDRESS
+#define MAX_CMDLINE_ADDRESS    ULONG_MAX
+#endif
+
 /*
  * Convert the unicode UEFI command line to ASCII to pass to kernel.
  * Size of memory allocated return in *cmd_line_len.
@@ -684,7 +688,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
 
        options_bytes++;        /* NUL termination */
 
-       status = efi_low_alloc(sys_table_arg, options_bytes, 0, &cmdline_addr);
+       status = efi_high_alloc(sys_table_arg, options_bytes, 0,
+                               &cmdline_addr, MAX_CMDLINE_ADDRESS);
        if (status != EFI_SUCCESS)
                return NULL;
 
index 6b6548fda0895ecb0ca7e9a60699d7100e335566..5ed3d3f3816637cd10d007f381797320f0567305 100644 (file)
@@ -43,4 +43,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
                     unsigned long desc_size, efi_memory_desc_t *runtime_map,
                     int *count);
 
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
+                                 unsigned long size, u8 *out);
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+                             unsigned long size, unsigned long align,
+                             unsigned long *addr, unsigned long random_seed);
+
 #endif
index b62e2f5dcab3b2d95074b534de803915145dc20c..b1c22cf18f7d39f531bca06d99ac44ff1d6a9e32 100644 (file)
@@ -147,6 +147,20 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        if (status)
                goto fdt_set_fail;
 
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+               efi_status_t efi_status;
+
+               efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+                                                 (u8 *)&fdt_val64);
+               if (efi_status == EFI_SUCCESS) {
+                       status = fdt_setprop(fdt, node, "kaslr-seed",
+                                            &fdt_val64, sizeof(fdt_val64));
+                       if (status)
+                               goto fdt_set_fail;
+               } else if (efi_status != EFI_NOT_FOUND) {
+                       return efi_status;
+               }
+       }
        return EFI_SUCCESS;
 
 fdt_set_fail:
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
new file mode 100644 (file)
index 0000000..53f6d3f
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd;  <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+struct efi_rng_protocol {
+       efi_status_t (*get_info)(struct efi_rng_protocol *,
+                                unsigned long *, efi_guid_t *);
+       efi_status_t (*get_rng)(struct efi_rng_protocol *,
+                               efi_guid_t *, unsigned long, u8 *out);
+};
+
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
+                                 unsigned long size, u8 *out)
+{
+       efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+       efi_status_t status;
+       struct efi_rng_protocol *rng;
+
+       status = efi_call_early(locate_protocol, &rng_proto, NULL,
+                               (void **)&rng);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       return rng->get_rng(rng, NULL, size, out);
+}
+
+/*
+ * Return the number of slots covered by this entry, i.e., the number of
+ * addresses it covers that are suitably aligned and supply enough room
+ * for the allocation.
+ */
+static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+                                        unsigned long size,
+                                        unsigned long align)
+{
+       u64 start, end;
+
+       if (md->type != EFI_CONVENTIONAL_MEMORY)
+               return 0;
+
+       start = round_up(md->phys_addr, align);
+       end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size,
+                        align);
+
+       if (start > end)
+               return 0;
+
+       return (end - start + 1) / align;
+}
+
+/*
+ * The UEFI memory descriptors have a virtual address field that is only used
+ * when installing the virtual mapping using SetVirtualAddressMap(). Since it
+ * is unused here, we can reuse it to keep track of each descriptor's slot
+ * count.
+ */
+#define MD_NUM_SLOTS(md)       ((md)->virt_addr)
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+                             unsigned long size,
+                             unsigned long align,
+                             unsigned long *addr,
+                             unsigned long random_seed)
+{
+       unsigned long map_size, desc_size, total_slots = 0, target_slot;
+       efi_status_t status;
+       efi_memory_desc_t *memory_map;
+       int map_offset;
+
+       status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
+                                   &desc_size, NULL, NULL);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       if (align < EFI_ALLOC_ALIGN)
+               align = EFI_ALLOC_ALIGN;
+
+       /* count the suitable slots in each memory map entry */
+       for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+               efi_memory_desc_t *md = (void *)memory_map + map_offset;
+               unsigned long slots;
+
+               slots = get_entry_num_slots(md, size, align);
+               MD_NUM_SLOTS(md) = slots;
+               total_slots += slots;
+       }
+
+       /* find a random number between 0 and total_slots */
+       target_slot = (total_slots * (u16)random_seed) >> 16;
+
+       /*
+        * target_slot is now a value in the range [0, total_slots), and so
+        * it corresponds with exactly one of the suitable slots we recorded
+        * when iterating over the memory map the first time around.
+        *
+        * So iterate over the memory map again, subtracting the number of
+        * slots of each entry at each iteration, until we have found the entry
+        * that covers our chosen slot. Use the residual value of target_slot
+        * to calculate the randomly chosen address, and allocate it directly
+        * using EFI_ALLOCATE_ADDRESS.
+        */
+       for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+               efi_memory_desc_t *md = (void *)memory_map + map_offset;
+               efi_physical_addr_t target;
+               unsigned long pages;
+
+               if (target_slot >= MD_NUM_SLOTS(md)) {
+                       target_slot -= MD_NUM_SLOTS(md);
+                       continue;
+               }
+
+               target = round_up(md->phys_addr, align) + target_slot * align;
+               pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+
+               status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+                                       EFI_LOADER_DATA, pages, &target);
+               if (status == EFI_SUCCESS)
+                       *addr = target;
+               break;
+       }
+
+       efi_call_early(free_pool, memory_map);
+
+       return status;
+}
index 6c8921140f024c300bf34234de4c8545496953a5..130cb21140592bd995e9e5c1c1bd10e7156ef7e7 100644 (file)
@@ -4,11 +4,12 @@
 menuconfig CORESIGHT
        bool "CoreSight Tracing Support"
        select ARM_AMBA
+       select PERF_EVENTS
        help
          This framework provides a kernel interface for the CoreSight debug
          and trace drivers to register themselves with. It's intended to build
          a topological view of the CoreSight components based on a DT
-         specification and configure the right serie of components when a
+         specification and configure the right series of components when a
          trace source gets enabled.
 
 if CORESIGHT
@@ -77,4 +78,15 @@ config CORESIGHT_QCOM_REPLICATOR
          programmable ATB replicator sends the ATB trace stream from the
          ETB/ETF to the TPIUi and ETR.
 
+config CORESIGHT_STM
+       bool "CoreSight System Trace Macrocell driver"
+       depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
+       select CORESIGHT_LINKS_AND_SINKS
+       select STM
+       help
+         This driver provides support for hardware assisted software
+         instrumentation based tracing. This is primarily used for
+         logging useful software events or data coming from various entities
+         in the system, possibly running different OSs
+
 endif
index 99f8e5f6256e25c438862c2f93b80f4e59eb0f64..af480d9c1441ae30d41269f5c9f1e7588acd14e3 100644 (file)
@@ -1,13 +1,18 @@
 #
 # Makefile for CoreSight drivers.
 #
-obj-$(CONFIG_CORESIGHT) += coresight.o
+obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o
 obj-$(CONFIG_OF) += of_coresight.o
-obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
+obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
+                                            coresight-tmc-etf.o \
+                                            coresight-tmc-etr.o
 obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
 obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
 obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
                                           coresight-replicator.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
+                                       coresight-etm3x-sysfs.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
+                                       coresight-etm4x-sysfs.o
 obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
+obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
index 77d0f9c1118dfdfcc29a2d0435f3311a12793414..4d20b0be0c0b6a337cdec7cbbfd17f8ce94e2a63 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Embedded Trace Buffer driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,8 +12,8 @@
  * GNU General Public License for more details.
  */
 
+#include <asm/local.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/coresight.h>
 #include <linux/amba/bus.h>
 #include <linux/clk.h>
+#include <linux/circ_buf.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+
+#include <asm/local.h>
 
 #include "coresight-priv.h"
 
  * @csdev:     component vitals needed by the framework.
  * @miscdev:   specifics to handle "/dev/xyz.etb" entry.
  * @spinlock:  only one at a time pls.
- * @in_use:    synchronise user space access to etb buffer.
+ * @reading:   synchronise user space access to etb buffer.
+ * @mode:      this ETB is being used.
  * @buf:       area of memory where ETB buffer content gets sent.
  * @buffer_depth: size of @buf.
- * @enable:    this ETB is being used.
  * @trigger_cntr: amount of words to store after a trigger.
  */
 struct etb_drvdata {
@@ -84,10 +91,10 @@ struct etb_drvdata {
        struct coresight_device *csdev;
        struct miscdevice       miscdev;
        spinlock_t              spinlock;
-       atomic_t                in_use;
+       local_t                 reading;
+       local_t                 mode;
        u8                      *buf;
        u32                     buffer_depth;
-       bool                    enable;
        u32                     trigger_cntr;
 };
 
@@ -132,18 +139,31 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
        CS_LOCK(drvdata->base);
 }
 
-static int etb_enable(struct coresight_device *csdev)
+static int etb_enable(struct coresight_device *csdev, u32 mode)
 {
-       struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       u32 val;
        unsigned long flags;
+       struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
+       val = local_cmpxchg(&drvdata->mode,
+                           CS_MODE_DISABLED, mode);
+       /*
+        * When accessing from Perf, a HW buffer can be handled
+        * by a single trace entity.  In sysFS mode many tracers
+        * can be logging to the same HW buffer.
+        */
+       if (val == CS_MODE_PERF)
+               return -EBUSY;
+
+       /* Nothing to do, the tracer is already enabled. */
+       if (val == CS_MODE_SYSFS)
+               goto out;
 
        spin_lock_irqsave(&drvdata->spinlock, flags);
        etb_enable_hw(drvdata);
-       drvdata->enable = true;
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+out:
        dev_info(drvdata->dev, "ETB enabled\n");
        return 0;
 }
@@ -244,17 +264,226 @@ static void etb_disable(struct coresight_device *csdev)
        spin_lock_irqsave(&drvdata->spinlock, flags);
        etb_disable_hw(drvdata);
        etb_dump_hw(drvdata);
-       drvdata->enable = false;
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-       pm_runtime_put(drvdata->dev);
+       local_set(&drvdata->mode, CS_MODE_DISABLED);
 
        dev_info(drvdata->dev, "ETB disabled\n");
 }
 
+static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
+                             void **pages, int nr_pages, bool overwrite)
+{
+       int node;
+       struct cs_buffers *buf;
+
+       if (cpu == -1)
+               cpu = smp_processor_id();
+       node = cpu_to_node(cpu);
+
+       buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+       if (!buf)
+               return NULL;
+
+       buf->snapshot = overwrite;
+       buf->nr_pages = nr_pages;
+       buf->data_pages = pages;
+
+       return buf;
+}
+
+static void etb_free_buffer(void *config)
+{
+       struct cs_buffers *buf = config;
+
+       kfree(buf);
+}
+
+static int etb_set_buffer(struct coresight_device *csdev,
+                         struct perf_output_handle *handle,
+                         void *sink_config)
+{
+       int ret = 0;
+       unsigned long head;
+       struct cs_buffers *buf = sink_config;
+
+       /* wrap head around to the amount of space we have */
+       head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+
+       /* find the page to write to */
+       buf->cur = head / PAGE_SIZE;
+
+       /* and offset within that page */
+       buf->offset = head % PAGE_SIZE;
+
+       local_set(&buf->data_size, 0);
+
+       return ret;
+}
+
+static unsigned long etb_reset_buffer(struct coresight_device *csdev,
+                                     struct perf_output_handle *handle,
+                                     void *sink_config, bool *lost)
+{
+       unsigned long size = 0;
+       struct cs_buffers *buf = sink_config;
+
+       if (buf) {
+               /*
+                * In snapshot mode ->data_size holds the new address of the
+                * ring buffer's head.  The size itself is the whole address
+                * range since we want the latest information.
+                */
+               if (buf->snapshot)
+                       handle->head = local_xchg(&buf->data_size,
+                                                 buf->nr_pages << PAGE_SHIFT);
+
+               /*
+                * Tell the tracer PMU how much we got in this run and if
+                * something went wrong along the way.  Nobody else can use
+                * this cs_buffers instance until we are done.  As such
+                * resetting parameters here and squaring off with the ring
+                * buffer API in the tracer PMU is fine.
+                */
+               *lost = !!local_xchg(&buf->lost, 0);
+               size = local_xchg(&buf->data_size, 0);
+       }
+
+       return size;
+}
+
+static void etb_update_buffer(struct coresight_device *csdev,
+                             struct perf_output_handle *handle,
+                             void *sink_config)
+{
+       int i, cur;
+       u8 *buf_ptr;
+       u32 read_ptr, write_ptr, capacity;
+       u32 status, read_data, to_read;
+       unsigned long offset;
+       struct cs_buffers *buf = sink_config;
+       struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       if (!buf)
+               return;
+
+       capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+
+       CS_UNLOCK(drvdata->base);
+       etb_disable_hw(drvdata);
+
+       /* unit is in words, not bytes */
+       read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+       write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+
+       /*
+        * Entries should be aligned to the frame size.  If they are not
+        * go back to the last alignement point to give decoding tools a
+        * chance to fix things.
+        */
+       if (write_ptr % ETB_FRAME_SIZE_WORDS) {
+               dev_err(drvdata->dev,
+                       "write_ptr: %lu not aligned to formatter frame size\n",
+                       (unsigned long)write_ptr);
+
+               write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
+               local_inc(&buf->lost);
+       }
+
+       /*
+        * Get a hold of the status register and see if a wrap around
+        * has occurred.  If so adjust things accordingly.  Otherwise
+        * start at the beginning and go until the write pointer has
+        * been reached.
+        */
+       status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
+       if (status & ETB_STATUS_RAM_FULL) {
+               local_inc(&buf->lost);
+               to_read = capacity;
+               read_ptr = write_ptr;
+       } else {
+               to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
+               to_read *= ETB_FRAME_SIZE_WORDS;
+       }
+
+       /*
+        * Make sure we don't overwrite data that hasn't been consumed yet.
+        * It is entirely possible that the HW buffer has more data than the
+        * ring buffer can currently handle.  If so adjust the start address
+        * to take only the last traces.
+        *
+        * In snapshot mode we are looking to get the latest traces only and as
+        * such, we don't care about not overwriting data that hasn't been
+        * processed by user space.
+        */
+       if (!buf->snapshot && to_read > handle->size) {
+               u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
+
+               /* The new read pointer must be frame size aligned */
+               to_read = handle->size & mask;
+               /*
+                * Move the RAM read pointer up, keeping in mind that
+                * everything is in frame size units.
+                */
+               read_ptr = (write_ptr + drvdata->buffer_depth) -
+                                       to_read / ETB_FRAME_SIZE_WORDS;
+               /* Wrap around if need be*/
+               if (read_ptr > (drvdata->buffer_depth - 1))
+                       read_ptr -= drvdata->buffer_depth;
+               /* let the decoder know we've skipped ahead */
+               local_inc(&buf->lost);
+       }
+
+       /* finally tell HW where we want to start reading from */
+       writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+       cur = buf->cur;
+       offset = buf->offset;
+       for (i = 0; i < to_read; i += 4) {
+               buf_ptr = buf->data_pages[cur] + offset;
+               read_data = readl_relaxed(drvdata->base +
+                                         ETB_RAM_READ_DATA_REG);
+               *buf_ptr++ = read_data >> 0;
+               *buf_ptr++ = read_data >> 8;
+               *buf_ptr++ = read_data >> 16;
+               *buf_ptr++ = read_data >> 24;
+
+               offset += 4;
+               if (offset >= PAGE_SIZE) {
+                       offset = 0;
+                       cur++;
+                       /* wrap around at the end of the buffer */
+                       cur &= buf->nr_pages - 1;
+               }
+       }
+
+       /* reset ETB buffer for next run */
+       writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+       writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+
+       /*
+        * In snapshot mode all we have to do is communicate to
+        * perf_aux_output_end() the address of the current head.  In full
+        * trace mode the same function expects a size to move rb->aux_head
+        * forward.
+        */
+       if (buf->snapshot)
+               local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
+       else
+               local_add(to_read, &buf->data_size);
+
+       etb_enable_hw(drvdata);
+       CS_LOCK(drvdata->base);
+}
+
 static const struct coresight_ops_sink etb_sink_ops = {
        .enable         = etb_enable,
        .disable        = etb_disable,
+       .alloc_buffer   = etb_alloc_buffer,
+       .free_buffer    = etb_free_buffer,
+       .set_buffer     = etb_set_buffer,
+       .reset_buffer   = etb_reset_buffer,
+       .update_buffer  = etb_update_buffer,
 };
 
 static const struct coresight_ops etb_cs_ops = {
@@ -266,7 +495,7 @@ static void etb_dump(struct etb_drvdata *drvdata)
        unsigned long flags;
 
        spin_lock_irqsave(&drvdata->spinlock, flags);
-       if (drvdata->enable) {
+       if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
                etb_disable_hw(drvdata);
                etb_dump_hw(drvdata);
                etb_enable_hw(drvdata);
@@ -281,7 +510,7 @@ static int etb_open(struct inode *inode, struct file *file)
        struct etb_drvdata *drvdata = container_of(file->private_data,
                                                   struct etb_drvdata, miscdev);
 
-       if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+       if (local_cmpxchg(&drvdata->reading, 0, 1))
                return -EBUSY;
 
        dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -317,7 +546,7 @@ static int etb_release(struct inode *inode, struct file *file)
 {
        struct etb_drvdata *drvdata = container_of(file->private_data,
                                                   struct etb_drvdata, miscdev);
-       atomic_set(&drvdata->in_use, 0);
+       local_set(&drvdata->reading, 0);
 
        dev_dbg(drvdata->dev, "%s: released\n", __func__);
        return 0;
@@ -331,47 +560,29 @@ static const struct file_operations etb_fops = {
        .llseek         = no_llseek,
 };
 
-static ssize_t status_show(struct device *dev,
-                          struct device_attribute *attr, char *buf)
-{
-       unsigned long flags;
-       u32 etb_rdr, etb_sr, etb_rrp, etb_rwp;
-       u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr;
-       struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       pm_runtime_get_sync(drvdata->dev);
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       CS_UNLOCK(drvdata->base);
-
-       etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
-       etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG);
-       etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
-       etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
-       etb_trg = readl_relaxed(drvdata->base + ETB_TRG);
-       etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG);
-       etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR);
-       etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
-
-       CS_LOCK(drvdata->base);
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
-       pm_runtime_put(drvdata->dev);
-
-       return sprintf(buf,
-                      "Depth:\t\t0x%x\n"
-                      "Status:\t\t0x%x\n"
-                      "RAM read ptr:\t0x%x\n"
-                      "RAM wrt ptr:\t0x%x\n"
-                      "Trigger cnt:\t0x%x\n"
-                      "Control:\t0x%x\n"
-                      "Flush status:\t0x%x\n"
-                      "Flush ctrl:\t0x%x\n",
-                      etb_rdr, etb_sr, etb_rrp, etb_rwp,
-                      etb_trg, etb_cr, etb_ffsr, etb_ffcr);
-
-       return -EINVAL;
-}
-static DEVICE_ATTR_RO(status);
+#define coresight_etb10_simple_func(name, offset)                       \
+       coresight_simple_func(struct etb_drvdata, name, offset)
+
+coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG);
+coresight_etb10_simple_func(sts, ETB_STATUS_REG);
+coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER);
+coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER);
+coresight_etb10_simple_func(trg, ETB_TRG);
+coresight_etb10_simple_func(ctl, ETB_CTL_REG);
+coresight_etb10_simple_func(ffsr, ETB_FFSR);
+coresight_etb10_simple_func(ffcr, ETB_FFCR);
+
+static struct attribute *coresight_etb_mgmt_attrs[] = {
+       &dev_attr_rdp.attr,
+       &dev_attr_sts.attr,
+       &dev_attr_rrp.attr,
+       &dev_attr_rwp.attr,
+       &dev_attr_trg.attr,
+       &dev_attr_ctl.attr,
+       &dev_attr_ffsr.attr,
+       &dev_attr_ffcr.attr,
+       NULL,
+};
 
 static ssize_t trigger_cntr_show(struct device *dev,
                            struct device_attribute *attr, char *buf)
@@ -401,10 +612,23 @@ static DEVICE_ATTR_RW(trigger_cntr);
 
 static struct attribute *coresight_etb_attrs[] = {
        &dev_attr_trigger_cntr.attr,
-       &dev_attr_status.attr,
        NULL,
 };
-ATTRIBUTE_GROUPS(coresight_etb);
+
+static const struct attribute_group coresight_etb_group = {
+       .attrs = coresight_etb_attrs,
+};
+
+static const struct attribute_group coresight_etb_mgmt_group = {
+       .attrs = coresight_etb_mgmt_attrs,
+       .name = "mgmt",
+};
+
+const struct attribute_group *coresight_etb_groups[] = {
+       &coresight_etb_group,
+       &coresight_etb_mgmt_group,
+       NULL,
+};
 
 static int etb_probe(struct amba_device *adev, const struct amba_id *id)
 {
@@ -481,7 +705,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
        if (ret)
                goto err_misc_register;
 
-       dev_info(dev, "ETB initialized\n");
        return 0;
 
 err_misc_register:
@@ -489,15 +712,6 @@ err_misc_register:
        return ret;
 }
 
-static int etb_remove(struct amba_device *adev)
-{
-       struct etb_drvdata *drvdata = amba_get_drvdata(adev);
-
-       misc_deregister(&drvdata->miscdev);
-       coresight_unregister(drvdata->csdev);
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int etb_runtime_suspend(struct device *dev)
 {
@@ -537,14 +751,10 @@ static struct amba_driver etb_driver = {
                .name   = "coresight-etb10",
                .owner  = THIS_MODULE,
                .pm     = &etb_dev_pm_ops,
+               .suppress_bind_attrs = true,
 
        },
        .probe          = etb_probe,
-       .remove         = etb_remove,
        .id_table       = etb_ids,
 };
-
-module_amba_driver(etb_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
+builtin_amba_driver(etb_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
new file mode 100644 (file)
index 0000000..755125f
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "coresight-priv.h"
+
+static struct pmu etm_pmu;
+static bool etm_perf_up;
+
+/**
+ * struct etm_event_data - Coresight specifics associated to an event
+ * @work:              Handle to free allocated memory outside IRQ context.
+ * @mask:              Hold the CPU(s) this event was set for.
+ * @snk_config:                The sink configuration.
+ * @path:              An array of path, each slot for one CPU.
+ */
+struct etm_event_data {
+       struct work_struct work;
+       cpumask_t mask;
+       void *snk_config;
+       struct list_head **path;
+};
+
+static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
+
+/* ETMv3.5/PTM's ETMCR is 'config' */
+PMU_FORMAT_ATTR(cycacc,                "config:" __stringify(ETM_OPT_CYCACC));
+PMU_FORMAT_ATTR(timestamp,     "config:" __stringify(ETM_OPT_TS));
+
+static struct attribute *etm_config_formats_attr[] = {
+       &format_attr_cycacc.attr,
+       &format_attr_timestamp.attr,
+       NULL,
+};
+
+static struct attribute_group etm_pmu_format_group = {
+       .name   = "format",
+       .attrs  = etm_config_formats_attr,
+};
+
+static const struct attribute_group *etm_pmu_attr_groups[] = {
+       &etm_pmu_format_group,
+       NULL,
+};
+
+static void etm_event_read(struct perf_event *event) {}
+
+static int etm_event_init(struct perf_event *event)
+{
+       if (event->attr.type != etm_pmu.type)
+               return -ENOENT;
+
+       return 0;
+}
+
+static void free_event_data(struct work_struct *work)
+{
+       int cpu;
+       cpumask_t *mask;
+       struct etm_event_data *event_data;
+       struct coresight_device *sink;
+
+       event_data = container_of(work, struct etm_event_data, work);
+       mask = &event_data->mask;
+       /*
+        * First deal with the sink configuration.  See comment in
+        * etm_setup_aux() about why we take the first available path.
+        */
+       if (event_data->snk_config) {
+               cpu = cpumask_first(mask);
+               sink = coresight_get_sink(event_data->path[cpu]);
+               if (sink_ops(sink)->free_buffer)
+                       sink_ops(sink)->free_buffer(event_data->snk_config);
+       }
+
+       for_each_cpu(cpu, mask) {
+               if (event_data->path[cpu])
+                       coresight_release_path(event_data->path[cpu]);
+       }
+
+       kfree(event_data->path);
+       kfree(event_data);
+}
+
+static void *alloc_event_data(int cpu)
+{
+       int size;
+       cpumask_t *mask;
+       struct etm_event_data *event_data;
+
+       /* First get memory for the session's data */
+       event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
+       if (!event_data)
+               return NULL;
+
+       /* Make sure nothing disappears under us */
+       get_online_cpus();
+       size = num_online_cpus();
+
+       mask = &event_data->mask;
+       if (cpu != -1)
+               cpumask_set_cpu(cpu, mask);
+       else
+               cpumask_copy(mask, cpu_online_mask);
+       put_online_cpus();
+
+       /*
+        * Each CPU has a single path between source and destination.  As such
+        * allocate an array using CPU numbers as indexes.  That way a path
+        * for any CPU can easily be accessed at any given time.  We proceed
+        * the same way for sessions involving a single CPU.  The cost of
+        * unused memory when dealing with single CPU trace scenarios is small
+        * compared to the cost of searching through an optimized array.
+        */
+       event_data->path = kcalloc(size,
+                                  sizeof(struct list_head *), GFP_KERNEL);
+       if (!event_data->path) {
+               kfree(event_data);
+               return NULL;
+       }
+
+       return event_data;
+}
+
+static void etm_free_aux(void *data)
+{
+       struct etm_event_data *event_data = data;
+
+       schedule_work(&event_data->work);
+}
+
+static void *etm_setup_aux(int event_cpu, void **pages,
+                          int nr_pages, bool overwrite)
+{
+       int cpu;
+       cpumask_t *mask;
+       struct coresight_device *sink;
+       struct etm_event_data *event_data = NULL;
+
+       event_data = alloc_event_data(event_cpu);
+       if (!event_data)
+               return NULL;
+
+       INIT_WORK(&event_data->work, free_event_data);
+
+       mask = &event_data->mask;
+
+       /* Setup the path for each CPU in a trace session */
+       for_each_cpu(cpu, mask) {
+               struct coresight_device *csdev;
+
+               csdev = per_cpu(csdev_src, cpu);
+               if (!csdev)
+                       goto err;
+
+               /*
+                * Building a path doesn't enable it, it simply builds a
+                * list of devices from source to sink that can be
+                * referenced later when the path is actually needed.
+                */
+               event_data->path[cpu] = coresight_build_path(csdev);
+               if (!event_data->path[cpu])
+                       goto err;
+       }
+
+       /*
+        * In theory nothing prevent tracers in a trace session from being
+        * associated with different sinks, nor having a sink per tracer.  But
+        * until we have HW with this kind of topology and a way to convey
+        * sink assignement from the perf cmd line we need to assume tracers
+        * in a trace session are using the same sink.  Therefore pick the sink
+        * found at the end of the first available path.
+        */
+       cpu = cpumask_first(mask);
+       /* Grab the sink at the end of the path */
+       sink = coresight_get_sink(event_data->path[cpu]);
+       if (!sink)
+               goto err;
+
+       if (!sink_ops(sink)->alloc_buffer)
+               goto err;
+
+       /* Get the AUX specific data from the sink buffer */
+       event_data->snk_config =
+                       sink_ops(sink)->alloc_buffer(sink, cpu, pages,
+                                                    nr_pages, overwrite);
+       if (!event_data->snk_config)
+               goto err;
+
+out:
+       return event_data;
+
+err:
+       etm_free_aux(event_data);
+       event_data = NULL;
+       goto out;
+}
+
+static void etm_event_start(struct perf_event *event, int flags)
+{
+       int cpu = smp_processor_id();
+       struct etm_event_data *event_data;
+       struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+       struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+
+       if (!csdev)
+               goto fail;
+
+       /*
+        * Deal with the ring buffer API and get a handle on the
+        * session's information.
+        */
+       event_data = perf_aux_output_begin(handle, event);
+       if (!event_data)
+               goto fail;
+
+       /* We need a sink, no need to continue without one */
+       sink = coresight_get_sink(event_data->path[cpu]);
+       if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
+               goto fail_end_stop;
+
+       /* Configure the sink */
+       if (sink_ops(sink)->set_buffer(sink, handle,
+                                      event_data->snk_config))
+               goto fail_end_stop;
+
+       /* Nothing will happen without a path */
+       if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
+               goto fail_end_stop;
+
+       /* Tell the perf core the event is alive */
+       event->hw.state = 0;
+
+       /* Finally enable the tracer */
+       if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF))
+               goto fail_end_stop;
+
+out:
+       return;
+
+fail_end_stop:
+       perf_aux_output_end(handle, 0, true);
+fail:
+       event->hw.state = PERF_HES_STOPPED;
+       goto out;
+}
+
+static void etm_event_stop(struct perf_event *event, int mode)
+{
+       bool lost;
+       int cpu = smp_processor_id();
+       unsigned long size;
+       struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+       struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+       struct etm_event_data *event_data = perf_get_aux(handle);
+
+       if (event->hw.state == PERF_HES_STOPPED)
+               return;
+
+       if (!csdev)
+               return;
+
+       sink = coresight_get_sink(event_data->path[cpu]);
+       if (!sink)
+               return;
+
+       /* stop tracer */
+       source_ops(csdev)->disable(csdev);
+
+       /* tell the core */
+       event->hw.state = PERF_HES_STOPPED;
+
+       if (mode & PERF_EF_UPDATE) {
+               if (WARN_ON_ONCE(handle->event != event))
+                       return;
+
+               /* update trace information */
+               if (!sink_ops(sink)->update_buffer)
+                       return;
+
+               sink_ops(sink)->update_buffer(sink, handle,
+                                             event_data->snk_config);
+
+               if (!sink_ops(sink)->reset_buffer)
+                       return;
+
+               size = sink_ops(sink)->reset_buffer(sink, handle,
+                                                   event_data->snk_config,
+                                                   &lost);
+
+               perf_aux_output_end(handle, size, lost);
+       }
+
+       /* Disabling the path make its elements available to other sessions */
+       coresight_disable_path(event_data->path[cpu]);
+}
+
+static int etm_event_add(struct perf_event *event, int mode)
+{
+       int ret = 0;
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (mode & PERF_EF_START) {
+               etm_event_start(event, 0);
+               if (hwc->state & PERF_HES_STOPPED)
+                       ret = -EINVAL;
+       } else {
+               hwc->state = PERF_HES_STOPPED;
+       }
+
+       return ret;
+}
+
+static void etm_event_del(struct perf_event *event, int mode)
+{
+       etm_event_stop(event, PERF_EF_UPDATE);
+}
+
+int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{
+       char entry[sizeof("cpu9999999")];
+       int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
+       struct device *pmu_dev = etm_pmu.dev;
+       struct device *cs_dev = &csdev->dev;
+
+       sprintf(entry, "cpu%d", cpu);
+
+       if (!etm_perf_up)
+               return -EPROBE_DEFER;
+
+       if (link) {
+               ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
+               if (ret)
+                       return ret;
+               per_cpu(csdev_src, cpu) = csdev;
+       } else {
+               sysfs_remove_link(&pmu_dev->kobj, entry);
+               per_cpu(csdev_src, cpu) = NULL;
+       }
+
+       return 0;
+}
+
+static int __init etm_perf_init(void)
+{
+       int ret;
+
+       etm_pmu.capabilities    = PERF_PMU_CAP_EXCLUSIVE;
+
+       etm_pmu.attr_groups     = etm_pmu_attr_groups;
+       etm_pmu.task_ctx_nr     = perf_sw_context;
+       etm_pmu.read            = etm_event_read;
+       etm_pmu.event_init      = etm_event_init;
+       etm_pmu.setup_aux       = etm_setup_aux;
+       etm_pmu.free_aux        = etm_free_aux;
+       etm_pmu.start           = etm_event_start;
+       etm_pmu.stop            = etm_event_stop;
+       etm_pmu.add             = etm_event_add;
+       etm_pmu.del             = etm_event_del;
+
+       ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
+       if (ret == 0)
+               etm_perf_up = true;
+
+       return ret;
+}
+device_initcall(etm_perf_init);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
new file mode 100644 (file)
index 0000000..87f5a13
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CORESIGHT_ETM_PERF_H
+#define _CORESIGHT_ETM_PERF_H
+
+struct coresight_device;
+
+#ifdef CONFIG_CORESIGHT
+int etm_perf_symlink(struct coresight_device *csdev, bool link);
+
+#else
+static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{ return -EINVAL; }
+
+#endif /* CONFIG_CORESIGHT */
+
+#endif
index b4481eb29304a1ddf4a2b5a86d9244daa3c05f14..51597cb2c08af69293c0b17d8698019fc8af949e 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef _CORESIGHT_CORESIGHT_ETM_H
 #define _CORESIGHT_CORESIGHT_ETM_H
 
+#include <asm/local.h>
 #include <linux/spinlock.h>
 #include "coresight-priv.h"
 
 #define ETM_MODE_STALL         BIT(2)
 #define ETM_MODE_TIMESTAMP     BIT(3)
 #define ETM_MODE_CTXID         BIT(4)
-#define ETM_MODE_ALL           0x1f
+#define ETM_MODE_ALL           (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
+                                ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+                                ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
+                                ETM_MODE_EXCL_USER)
 
 #define ETM_SQR_MASK           0x3
 #define ETM_TRACEID_MASK       0x3f
 #define ETM_DEFAULT_EVENT_VAL  (ETM_HARD_WIRE_RES_A    |       \
                                 ETM_ADD_COMP_0         |       \
                                 ETM_EVENT_NOT_A)
+
 /**
- * struct etm_drvdata - specifics associated to an ETM component
- * @base:      memory mapped base address for this component.
- * @dev:       the device entity associated to this component.
- * @atclk:     optional clock for the core parts of the ETM.
- * @csdev:     component vitals needed by the framework.
- * @spinlock:  only one at a time pls.
- * @cpu:       the cpu this component is affined to.
- * @port_size: port size as reported by ETMCR bit 4-6 and 21.
- * @arch:      ETM/PTM version number.
- * @use_cpu14: true if management registers need to be accessed via CP14.
- * @enable:    is this ETM/PTM currently tracing.
- * @sticky_enable: true if ETM base configuration has been done.
- * @boot_enable:true if we should start tracing at boot time.
- * @os_unlock: true if access to management registers is allowed.
- * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
- * @nr_cntr:   Number of counters as found in ETMCCR bit 13-15.
- * @nr_ext_inp:        Number of external input as found in ETMCCR bit 17-19.
- * @nr_ext_out:        Number of external output as found in ETMCCR bit 20-22.
- * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
- * @etmccr:    value of register ETMCCR.
- * @etmccer:   value of register ETMCCER.
- * @traceid:   value of the current ID for this component.
+ * struct etm_config - configuration information related to an ETM
  * @mode:      controls various modes supported by this ETM/PTM.
  * @ctrl:      used in conjunction with @mode.
  * @trigger_event: setting for register ETMTRIGGER.
  * @startstop_ctrl: setting for register ETMTSSCR.
  * @enable_event: setting for register ETMTEEVR.
  * @enable_ctrl1: setting for register ETMTECR1.
+ * @enable_ctrl2: setting for register ETMTECR2.
  * @fifofull_level: setting for register ETMFFLR.
  * @addr_idx:  index for the address comparator selection.
  * @addr_val:  value for address comparator register.
  * @ctxid_mask: mask applicable to all the context IDs.
  * @sync_freq: Synchronisation frequency.
  * @timestamp_event: Defines an event that requests the insertion
                   of a timestamp into the trace stream.
*                  of a timestamp into the trace stream.
  */
-struct etm_drvdata {
-       void __iomem                    *base;
-       struct device                   *dev;
-       struct clk                      *atclk;
-       struct coresight_device         *csdev;
-       spinlock_t                      spinlock;
-       int                             cpu;
-       int                             port_size;
-       u8                              arch;
-       bool                            use_cp14;
-       bool                            enable;
-       bool                            sticky_enable;
-       bool                            boot_enable;
-       bool                            os_unlock;
-       u8                              nr_addr_cmp;
-       u8                              nr_cntr;
-       u8                              nr_ext_inp;
-       u8                              nr_ext_out;
-       u8                              nr_ctxid_cmp;
-       u32                             etmccr;
-       u32                             etmccer;
-       u32                             traceid;
+struct etm_config {
        u32                             mode;
        u32                             ctrl;
        u32                             trigger_event;
        u32                             startstop_ctrl;
        u32                             enable_event;
        u32                             enable_ctrl1;
+       u32                             enable_ctrl2;
        u32                             fifofull_level;
        u8                              addr_idx;
        u32                             addr_val[ETM_MAX_ADDR_CMP];
@@ -244,6 +209,56 @@ struct etm_drvdata {
        u32                             timestamp_event;
 };
 
+/**
+ * struct etm_drvdata - specifics associated to an ETM component
+ * @base:      memory mapped base address for this component.
+ * @dev:       the device entity associated to this component.
+ * @atclk:     optional clock for the core parts of the ETM.
+ * @csdev:     component vitals needed by the framework.
+ * @spinlock:  only one at a time pls.
+ * @cpu:       the cpu this component is affined to.
+ * @port_size: port size as reported by ETMCR bit 4-6 and 21.
+ * @arch:      ETM/PTM version number.
+ * @use_cpu14: true if management registers need to be accessed via CP14.
+ * @mode:      this tracer's mode, i.e sysFS, Perf or disabled.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:true if we should start tracing at boot time.
+ * @os_unlock: true if access to management registers is allowed.
+ * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
+ * @nr_cntr:   Number of counters as found in ETMCCR bit 13-15.
+ * @nr_ext_inp:        Number of external input as found in ETMCCR bit 17-19.
+ * @nr_ext_out:        Number of external output as found in ETMCCR bit 20-22.
+ * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
+ * @etmccr:    value of register ETMCCR.
+ * @etmccer:   value of register ETMCCER.
+ * @traceid:   value of the current ID for this component.
+ * @config:    structure holding configuration parameters.
+ */
+struct etm_drvdata {
+       void __iomem                    *base;
+       struct device                   *dev;
+       struct clk                      *atclk;
+       struct coresight_device         *csdev;
+       spinlock_t                      spinlock;
+       int                             cpu;
+       int                             port_size;
+       u8                              arch;
+       bool                            use_cp14;
+       local_t                         mode;
+       bool                            sticky_enable;
+       bool                            boot_enable;
+       bool                            os_unlock;
+       u8                              nr_addr_cmp;
+       u8                              nr_cntr;
+       u8                              nr_ext_inp;
+       u8                              nr_ext_out;
+       u8                              nr_ctxid_cmp;
+       u32                             etmccr;
+       u32                             etmccer;
+       u32                             traceid;
+       struct etm_config               config;
+};
+
 enum etm_addr_type {
        ETM_ADDR_TYPE_NONE,
        ETM_ADDR_TYPE_SINGLE,
@@ -251,4 +266,39 @@ enum etm_addr_type {
        ETM_ADDR_TYPE_START,
        ETM_ADDR_TYPE_STOP,
 };
+
+static inline void etm_writel(struct etm_drvdata *drvdata,
+                             u32 val, u32 off)
+{
+       if (drvdata->use_cp14) {
+               if (etm_writel_cp14(off, val)) {
+                       dev_err(drvdata->dev,
+                               "invalid CP14 access to ETM reg: %#x", off);
+               }
+       } else {
+               writel_relaxed(val, drvdata->base + off);
+       }
+}
+
+static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
+{
+       u32 val;
+
+       if (drvdata->use_cp14) {
+               if (etm_readl_cp14(off, &val)) {
+                       dev_err(drvdata->dev,
+                               "invalid CP14 access to ETM reg: %#x", off);
+               }
+       } else {
+               val = readl_relaxed(drvdata->base + off);
+       }
+
+       return val;
+}
+
+extern const struct attribute_group *coresight_etm_groups[];
+int etm_get_trace_id(struct etm_drvdata *drvdata);
+void etm_set_default(struct etm_config *config);
+void etm_config_trace_mode(struct etm_config *config);
+struct etm_config *get_etm_config(struct etm_drvdata *drvdata);
 #endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
new file mode 100644 (file)
index 0000000..02d4b62
--- /dev/null
@@ -0,0 +1,1265 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+#include "coresight-etm.h"
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_addr_cmp;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{      unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_cntr;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ctxid_cmp_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_ctxid_cmp;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ctxid_cmp);
+
+static ssize_t etmsr_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       unsigned long flags, val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       pm_runtime_get_sync(drvdata->dev);
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       CS_UNLOCK(drvdata->base);
+
+       val = etm_readl(drvdata, ETMSR);
+
+       CS_LOCK(drvdata->base);
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       pm_runtime_put(drvdata->dev);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(etmsr);
+
+static ssize_t reset_store(struct device *dev,
+                          struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       int i, ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val) {
+               spin_lock(&drvdata->spinlock);
+               memset(config, 0, sizeof(struct etm_config));
+               config->mode = ETM_MODE_EXCLUDE;
+               config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+               for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+                       config->addr_type[i] = ETM_ADDR_TYPE_NONE;
+               }
+
+               etm_set_default(config);
+               spin_unlock(&drvdata->spinlock);
+       }
+
+       return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->mode;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->mode = val & ETM_MODE_ALL;
+
+       if (config->mode & ETM_MODE_EXCLUDE)
+               config->enable_ctrl1 |= ETMTECR1_INC_EXC;
+       else
+               config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
+
+       if (config->mode & ETM_MODE_CYCACC)
+               config->ctrl |= ETMCR_CYC_ACC;
+       else
+               config->ctrl &= ~ETMCR_CYC_ACC;
+
+       if (config->mode & ETM_MODE_STALL) {
+               if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+                       dev_warn(drvdata->dev, "stall mode not supported\n");
+                       ret = -EINVAL;
+                       goto err_unlock;
+               }
+               config->ctrl |= ETMCR_STALL_MODE;
+        } else
+               config->ctrl &= ~ETMCR_STALL_MODE;
+
+       if (config->mode & ETM_MODE_TIMESTAMP) {
+               if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+                       dev_warn(drvdata->dev, "timestamp not supported\n");
+                       ret = -EINVAL;
+                       goto err_unlock;
+               }
+               config->ctrl |= ETMCR_TIMESTAMP_EN;
+       } else
+               config->ctrl &= ~ETMCR_TIMESTAMP_EN;
+
+       if (config->mode & ETM_MODE_CTXID)
+               config->ctrl |= ETMCR_CTXID_SIZE;
+       else
+               config->ctrl &= ~ETMCR_CTXID_SIZE;
+
+       if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+               etm_config_trace_mode(config);
+
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+
+err_unlock:
+       spin_unlock(&drvdata->spinlock);
+       return ret;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t trigger_event_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->trigger_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_event_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->trigger_event = val & ETM_EVENT_MASK;
+
+       return size;
+}
+static DEVICE_ATTR_RW(trigger_event);
+
+static ssize_t enable_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->enable_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t enable_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->enable_event = val & ETM_EVENT_MASK;
+
+       return size;
+}
+static DEVICE_ATTR_RW(enable_event);
+
+static ssize_t fifofull_level_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->fifofull_level;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t fifofull_level_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->fifofull_level = val;
+
+       return size;
+}
+static DEVICE_ATTR_RW(fifofull_level);
+
+static ssize_t addr_idx_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->addr_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_addr_cmp)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->addr_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_single_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+
+       val = config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+
+       config->addr_val[idx] = val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val1, val2;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (idx % 2 != 0) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+       if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+             (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val1 = config->addr_val[idx];
+       val2 = config->addr_val[idx + 1];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val1, val2;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+               return -EINVAL;
+       /* Lower address comparator cannot have a higher address value */
+       if (val1 > val2)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (idx % 2 != 0) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+       if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+             (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = val1;
+       config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+       config->addr_val[idx + 1] = val2;
+       config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+       config->enable_ctrl1 |= (1 << (idx/2));
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val = config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_START;
+       config->startstop_ctrl |= (1 << idx);
+       config->enable_ctrl1 |= BIT(25);
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val = config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+       config->startstop_ctrl |= (1 << (idx + 16));
+       config->enable_ctrl1 |= ETMTECR1_START_STOP;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_acctype_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->addr_acctype[config->addr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_acctype_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->addr_acctype[config->addr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_acctype);
+
+static ssize_t cntr_idx_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->cntr_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_cntr)
+               return -EINVAL;
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->cntr_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntr_rld_val_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->cntr_rld_val[config->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_val_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->cntr_rld_val[config->cntr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_val);
+
+static ssize_t cntr_event_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->cntr_event[config->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_event_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_event);
+
+static ssize_t cntr_rld_event_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->cntr_rld_event[config->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_event_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_event);
+
+static ssize_t cntr_val_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       int i, ret = 0;
+       u32 val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       if (!local_read(&drvdata->mode)) {
+               spin_lock(&drvdata->spinlock);
+               for (i = 0; i < drvdata->nr_cntr; i++)
+                       ret += sprintf(buf, "counter %d: %x\n",
+                                      i, config->cntr_val[i]);
+               spin_unlock(&drvdata->spinlock);
+               return ret;
+       }
+
+       for (i = 0; i < drvdata->nr_cntr; i++) {
+               val = etm_readl(drvdata, ETMCNTVRn(i));
+               ret += sprintf(buf, "counter %d: %x\n", i, val);
+       }
+
+       return ret;
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->cntr_val[config->cntr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t seq_12_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_12_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_12_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_12_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_12_event);
+
+static ssize_t seq_21_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_21_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_21_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_21_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_21_event);
+
+static ssize_t seq_23_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_23_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_23_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_23_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_23_event);
+
+static ssize_t seq_31_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_31_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_31_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_31_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_31_event);
+
+static ssize_t seq_32_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_32_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_32_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_32_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_32_event);
+
+static ssize_t seq_13_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_13_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_13_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_13_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_13_event);
+
+static ssize_t seq_curr_state_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       unsigned long val, flags;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       if (!local_read(&drvdata->mode)) {
+               val = config->seq_curr_state;
+               goto out;
+       }
+
+       pm_runtime_get_sync(drvdata->dev);
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       CS_UNLOCK(drvdata->base);
+       val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+       CS_LOCK(drvdata->base);
+
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       pm_runtime_put(drvdata->dev);
+out:
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_curr_state_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val > ETM_SEQ_STATE_MAX_VAL)
+               return -EINVAL;
+
+       config->seq_curr_state = val;
+
+       return size;
+}
+static DEVICE_ATTR_RW(seq_curr_state);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->ctxid_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_ctxid_cmp)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->ctxid_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_pid_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->ctxid_vpid[config->ctxid_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_pid_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       int ret;
+       unsigned long vpid, pid;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &vpid);
+       if (ret)
+               return ret;
+
+       pid = coresight_vpid_to_pid(vpid);
+
+       spin_lock(&drvdata->spinlock);
+       config->ctxid_pid[config->ctxid_idx] = pid;
+       config->ctxid_vpid[config->ctxid_idx] = vpid;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_pid);
+
+static ssize_t ctxid_mask_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->ctxid_mask;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_mask_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->ctxid_mask = val;
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_mask);
+
+static ssize_t sync_freq_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->sync_freq;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t sync_freq_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->sync_freq = val & ETM_SYNC_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(sync_freq);
+
+static ssize_t timestamp_event_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->timestamp_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t timestamp_event_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->timestamp_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(timestamp_event);
+
+static ssize_t cpu_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       int val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->cpu;
+       return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static ssize_t traceid_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = etm_get_trace_id(drvdata);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->traceid = val & ETM_TRACEID_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+static struct attribute *coresight_etm_attrs[] = {
+       &dev_attr_nr_addr_cmp.attr,
+       &dev_attr_nr_cntr.attr,
+       &dev_attr_nr_ctxid_cmp.attr,
+       &dev_attr_etmsr.attr,
+       &dev_attr_reset.attr,
+       &dev_attr_mode.attr,
+       &dev_attr_trigger_event.attr,
+       &dev_attr_enable_event.attr,
+       &dev_attr_fifofull_level.attr,
+       &dev_attr_addr_idx.attr,
+       &dev_attr_addr_single.attr,
+       &dev_attr_addr_range.attr,
+       &dev_attr_addr_start.attr,
+       &dev_attr_addr_stop.attr,
+       &dev_attr_addr_acctype.attr,
+       &dev_attr_cntr_idx.attr,
+       &dev_attr_cntr_rld_val.attr,
+       &dev_attr_cntr_event.attr,
+       &dev_attr_cntr_rld_event.attr,
+       &dev_attr_cntr_val.attr,
+       &dev_attr_seq_12_event.attr,
+       &dev_attr_seq_21_event.attr,
+       &dev_attr_seq_23_event.attr,
+       &dev_attr_seq_31_event.attr,
+       &dev_attr_seq_32_event.attr,
+       &dev_attr_seq_13_event.attr,
+       &dev_attr_seq_curr_state.attr,
+       &dev_attr_ctxid_idx.attr,
+       &dev_attr_ctxid_pid.attr,
+       &dev_attr_ctxid_mask.attr,
+       &dev_attr_sync_freq.attr,
+       &dev_attr_timestamp_event.attr,
+       &dev_attr_traceid.attr,
+       &dev_attr_cpu.attr,
+       NULL,
+};
+
+#define coresight_etm3x_simple_func(name, offset)                      \
+       coresight_simple_func(struct etm_drvdata, name, offset)
+
+coresight_etm3x_simple_func(etmccr, ETMCCR);
+coresight_etm3x_simple_func(etmccer, ETMCCER);
+coresight_etm3x_simple_func(etmscr, ETMSCR);
+coresight_etm3x_simple_func(etmidr, ETMIDR);
+coresight_etm3x_simple_func(etmcr, ETMCR);
+coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR);
+coresight_etm3x_simple_func(etmteevr, ETMTEEVR);
+coresight_etm3x_simple_func(etmtssvr, ETMTSSCR);
+coresight_etm3x_simple_func(etmtecr1, ETMTECR1);
+coresight_etm3x_simple_func(etmtecr2, ETMTECR2);
+
+static struct attribute *coresight_etm_mgmt_attrs[] = {
+       &dev_attr_etmccr.attr,
+       &dev_attr_etmccer.attr,
+       &dev_attr_etmscr.attr,
+       &dev_attr_etmidr.attr,
+       &dev_attr_etmcr.attr,
+       &dev_attr_etmtraceidr.attr,
+       &dev_attr_etmteevr.attr,
+       &dev_attr_etmtssvr.attr,
+       &dev_attr_etmtecr1.attr,
+       &dev_attr_etmtecr2.attr,
+       NULL,
+};
+
+static const struct attribute_group coresight_etm_group = {
+       .attrs = coresight_etm_attrs,
+};
+
+static const struct attribute_group coresight_etm_mgmt_group = {
+       .attrs = coresight_etm_mgmt_attrs,
+       .name = "mgmt",
+};
+
+const struct attribute_group *coresight_etm_groups[] = {
+       &coresight_etm_group,
+       &coresight_etm_mgmt_group,
+       NULL,
+};
index d630b7ece73521ccf8cd7b320ebd75ecc92eb1d3..d83ab82672e4e136ffb443d033c5c23ccba697f5 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Program Flow Trace driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
 #include <linux/amba/bus.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/clk.h>
+#include <linux/perf_event.h>
 #include <asm/sections.h>
 
 #include "coresight-etm.h"
+#include "coresight-etm-perf.h"
 
+/*
+ * Not really modular but using module_param is the easiest way to
+ * remain consistent with existing use cases for now.
+ */
 static int boot_enable;
 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
 
@@ -42,45 +51,16 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
 static int etm_count;
 static struct etm_drvdata *etmdrvdata[NR_CPUS];
 
-static inline void etm_writel(struct etm_drvdata *drvdata,
-                             u32 val, u32 off)
-{
-       if (drvdata->use_cp14) {
-               if (etm_writel_cp14(off, val)) {
-                       dev_err(drvdata->dev,
-                               "invalid CP14 access to ETM reg: %#x", off);
-               }
-       } else {
-               writel_relaxed(val, drvdata->base + off);
-       }
-}
-
-static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
-{
-       u32 val;
-
-       if (drvdata->use_cp14) {
-               if (etm_readl_cp14(off, &val)) {
-                       dev_err(drvdata->dev,
-                               "invalid CP14 access to ETM reg: %#x", off);
-               }
-       } else {
-               val = readl_relaxed(drvdata->base + off);
-       }
-
-       return val;
-}
-
 /*
  * Memory mapped writes to clear os lock are not supported on some processors
  * and OS lock must be unlocked before any memory mapped access on such
  * processors, otherwise memory mapped reads/writes will be invalid.
  */
-static void etm_os_unlock(void *info)
+static void etm_os_unlock(struct etm_drvdata *drvdata)
 {
-       struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
        /* Writing any value to ETMOSLAR unlocks the trace registers */
        etm_writel(drvdata, 0x0, ETMOSLAR);
+       drvdata->os_unlock = true;
        isb();
 }
 
@@ -215,1431 +195,450 @@ static void etm_clr_prog(struct etm_drvdata *drvdata)
        }
 }
 
-static void etm_set_default(struct etm_drvdata *drvdata)
-{
-       int i;
-
-       drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->enable_event = ETM_HARD_WIRE_RES_A;
-
-       drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
-
-       for (i = 0; i < drvdata->nr_cntr; i++) {
-               drvdata->cntr_rld_val[i] = 0x0;
-               drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
-               drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
-               drvdata->cntr_val[i] = 0x0;
-       }
-
-       drvdata->seq_curr_state = 0x0;
-       drvdata->ctxid_idx = 0x0;
-       for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
-               drvdata->ctxid_pid[i] = 0x0;
-               drvdata->ctxid_vpid[i] = 0x0;
-       }
-
-       drvdata->ctxid_mask = 0x0;
-}
-
-static void etm_enable_hw(void *info)
+void etm_set_default(struct etm_config *config)
 {
        int i;
-       u32 etmcr;
-       struct etm_drvdata *drvdata = info;
 
-       CS_UNLOCK(drvdata->base);
-
-       /* Turn engine on */
-       etm_clr_pwrdwn(drvdata);
-       /* Apply power to trace registers */
-       etm_set_pwrup(drvdata);
-       /* Make sure all registers are accessible */
-       etm_os_unlock(drvdata);
-
-       etm_set_prog(drvdata);
-
-       etmcr = etm_readl(drvdata, ETMCR);
-       etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
-       etmcr |= drvdata->port_size;
-       etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
-       etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
-       etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
-       etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
-       etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
-       etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
-       for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-               etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
-               etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
-       }
-       for (i = 0; i < drvdata->nr_cntr; i++) {
-               etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
-               etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
-               etm_writel(drvdata, drvdata->cntr_rld_event[i],
-                          ETMCNTRLDEVRn(i));
-               etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
-       }
-       etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
-       etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
-       etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
-       etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
-       etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
-       etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
-       etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
-       for (i = 0; i < drvdata->nr_ext_out; i++)
-               etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
-       for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
-               etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
-       etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
-       etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
-       /* No external input selected */
-       etm_writel(drvdata, 0x0, ETMEXTINSELR);
-       etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
-       /* No auxiliary control selected */
-       etm_writel(drvdata, 0x0, ETMAUXCR);
-       etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
-       /* No VMID comparator value selected */
-       etm_writel(drvdata, 0x0, ETMVMIDCVR);
-
-       /* Ensures trace output is enabled from this ETM */
-       etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
-
-       etm_clr_prog(drvdata);
-       CS_LOCK(drvdata->base);
-
-       dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
-}
-
-static int etm_trace_id(struct coresight_device *csdev)
-{
-       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-       unsigned long flags;
-       int trace_id = -1;
-
-       if (!drvdata->enable)
-               return drvdata->traceid;
-       pm_runtime_get_sync(csdev->dev.parent);
-
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-
-       CS_UNLOCK(drvdata->base);
-       trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
-       CS_LOCK(drvdata->base);
-
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(csdev->dev.parent);
-
-       return trace_id;
-}
-
-static int etm_enable(struct coresight_device *csdev)
-{
-       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-       int ret;
-
-       pm_runtime_get_sync(csdev->dev.parent);
-       spin_lock(&drvdata->spinlock);
+       if (WARN_ON_ONCE(!config))
+               return;
 
        /*
-        * Configure the ETM only if the CPU is online.  If it isn't online
-        * hw configuration will take place when 'CPU_STARTING' is received
-        * in @etm_cpu_callback.
+        * Taken verbatim from the TRM:
+        *
+        * To trace all memory:
+        *  set bit [24] in register 0x009, the ETMTECR1, to 1
+        *  set all other bits in register 0x009, the ETMTECR1, to 0
+        *  set all bits in register 0x007, the ETMTECR2, to 0
+        *  set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
         */
-       if (cpu_online(drvdata->cpu)) {
-               ret = smp_call_function_single(drvdata->cpu,
-                                              etm_enable_hw, drvdata, 1);
-               if (ret)
-                       goto err;
-       }
-
-       drvdata->enable = true;
-       drvdata->sticky_enable = true;
+       config->enable_ctrl1 = BIT(24);
+       config->enable_ctrl2 = 0x0;
+       config->enable_event = ETM_HARD_WIRE_RES_A;
 
-       spin_unlock(&drvdata->spinlock);
-
-       dev_info(drvdata->dev, "ETM tracing enabled\n");
-       return 0;
-err:
-       spin_unlock(&drvdata->spinlock);
-       pm_runtime_put(csdev->dev.parent);
-       return ret;
-}
+       config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+       config->enable_event = ETM_HARD_WIRE_RES_A;
 
-static void etm_disable_hw(void *info)
-{
-       int i;
-       struct etm_drvdata *drvdata = info;
-
-       CS_UNLOCK(drvdata->base);
-       etm_set_prog(drvdata);
-
-       /* Program trace enable to low by using always false event */
-       etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
-
-       /* Read back sequencer and counters for post trace analysis */
-       drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
-
-       for (i = 0; i < drvdata->nr_cntr; i++)
-               drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
-
-       etm_set_pwrdwn(drvdata);
-       CS_LOCK(drvdata->base);
-
-       dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
-}
-
-static void etm_disable(struct coresight_device *csdev)
-{
-       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
-       /*
-        * Taking hotplug lock here protects from clocks getting disabled
-        * with tracing being left on (crash scenario) if user disable occurs
-        * after cpu online mask indicates the cpu is offline but before the
-        * DYING hotplug callback is serviced by the ETM driver.
-        */
-       get_online_cpus();
-       spin_lock(&drvdata->spinlock);
-
-       /*
-        * Executing etm_disable_hw on the cpu whose ETM is being disabled
-        * ensures that register writes occur when cpu is powered.
-        */
-       smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
-       drvdata->enable = false;
-
-       spin_unlock(&drvdata->spinlock);
-       put_online_cpus();
-       pm_runtime_put(csdev->dev.parent);
-
-       dev_info(drvdata->dev, "ETM tracing disabled\n");
-}
-
-static const struct coresight_ops_source etm_source_ops = {
-       .trace_id       = etm_trace_id,
-       .enable         = etm_enable,
-       .disable        = etm_disable,
-};
-
-static const struct coresight_ops etm_cs_ops = {
-       .source_ops     = &etm_source_ops,
-};
-
-static ssize_t nr_addr_cmp_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_addr_cmp;
-       return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_addr_cmp);
-
-static ssize_t nr_cntr_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{      unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_cntr;
-       return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_cntr);
-
-static ssize_t nr_ctxid_cmp_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_ctxid_cmp;
-       return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ctxid_cmp);
-
-static ssize_t etmsr_show(struct device *dev,
-                         struct device_attribute *attr, char *buf)
-{
-       unsigned long flags, val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       pm_runtime_get_sync(drvdata->dev);
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       CS_UNLOCK(drvdata->base);
-
-       val = etm_readl(drvdata, ETMSR);
-
-       CS_LOCK(drvdata->base);
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(drvdata->dev);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(etmsr);
-
-static ssize_t reset_store(struct device *dev,
-                          struct device_attribute *attr,
-                          const char *buf, size_t size)
-{
-       int i, ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       if (val) {
-               spin_lock(&drvdata->spinlock);
-               drvdata->mode = ETM_MODE_EXCLUDE;
-               drvdata->ctrl = 0x0;
-               drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
-               drvdata->startstop_ctrl = 0x0;
-               drvdata->addr_idx = 0x0;
-               for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-                       drvdata->addr_val[i] = 0x0;
-                       drvdata->addr_acctype[i] = 0x0;
-                       drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
-               }
-               drvdata->cntr_idx = 0x0;
-
-               etm_set_default(drvdata);
-               spin_unlock(&drvdata->spinlock);
-       }
-
-       return size;
-}
-static DEVICE_ATTR_WO(reset);
-
-static ssize_t mode_show(struct device *dev,
-                        struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->mode;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t mode_store(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->mode = val & ETM_MODE_ALL;
-
-       if (drvdata->mode & ETM_MODE_EXCLUDE)
-               drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
-       else
-               drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
-
-       if (drvdata->mode & ETM_MODE_CYCACC)
-               drvdata->ctrl |= ETMCR_CYC_ACC;
-       else
-               drvdata->ctrl &= ~ETMCR_CYC_ACC;
-
-       if (drvdata->mode & ETM_MODE_STALL) {
-               if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
-                       dev_warn(drvdata->dev, "stall mode not supported\n");
-                       ret = -EINVAL;
-                       goto err_unlock;
-               }
-               drvdata->ctrl |= ETMCR_STALL_MODE;
-        } else
-               drvdata->ctrl &= ~ETMCR_STALL_MODE;
-
-       if (drvdata->mode & ETM_MODE_TIMESTAMP) {
-               if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
-                       dev_warn(drvdata->dev, "timestamp not supported\n");
-                       ret = -EINVAL;
-                       goto err_unlock;
-               }
-               drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
-       } else
-               drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
-
-       if (drvdata->mode & ETM_MODE_CTXID)
-               drvdata->ctrl |= ETMCR_CTXID_SIZE;
-       else
-               drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-
-err_unlock:
-       spin_unlock(&drvdata->spinlock);
-       return ret;
-}
-static DEVICE_ATTR_RW(mode);
-
-static ssize_t trigger_event_show(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->trigger_event;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t trigger_event_store(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       drvdata->trigger_event = val & ETM_EVENT_MASK;
-
-       return size;
-}
-static DEVICE_ATTR_RW(trigger_event);
-
-static ssize_t enable_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->enable_event;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t enable_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       drvdata->enable_event = val & ETM_EVENT_MASK;
-
-       return size;
-}
-static DEVICE_ATTR_RW(enable_event);
-
-static ssize_t fifofull_level_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->fifofull_level;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t fifofull_level_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       drvdata->fifofull_level = val;
-
-       return size;
-}
-static DEVICE_ATTR_RW(fifofull_level);
-
-static ssize_t addr_idx_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->addr_idx;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_idx_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       if (val >= drvdata->nr_addr_cmp)
-               return -EINVAL;
-
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->addr_idx = val;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_idx);
-
-static ssize_t addr_single_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EINVAL;
-       }
-
-       val = drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_single_store(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf, size_t size)
-{
-       u8 idx;
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EINVAL;
-       }
-
-       drvdata->addr_val[idx] = val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_single);
-
-static ssize_t addr_range_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       u8 idx;
-       unsigned long val1, val2;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (idx % 2 != 0) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-       if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-             (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val1 = drvdata->addr_val[idx];
-       val2 = drvdata->addr_val[idx + 1];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx %#lx\n", val1, val2);
-}
-
-static ssize_t addr_range_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val1, val2;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
-               return -EINVAL;
-       /* Lower address comparator cannot have a higher address value */
-       if (val1 > val2)
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (idx % 2 != 0) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-       if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-             (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = val1;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
-       drvdata->addr_val[idx + 1] = val2;
-       drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
-       drvdata->enable_ctrl1 |= (1 << (idx/2));
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_range);
-
-static ssize_t addr_start_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val = drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_start_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       u8 idx;
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
-       drvdata->startstop_ctrl |= (1 << idx);
-       drvdata->enable_ctrl1 |= BIT(25);
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_start);
-
-static ssize_t addr_stop_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val = drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_stop_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       u8 idx;
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
-       drvdata->startstop_ctrl |= (1 << (idx + 16));
-       drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_stop);
-
-static ssize_t addr_acctype_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val = drvdata->addr_acctype[drvdata->addr_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_acctype_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->addr_acctype[drvdata->addr_idx] = val;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_acctype);
-
-static ssize_t cntr_idx_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->cntr_idx;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_idx_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       if (val >= drvdata->nr_cntr)
-               return -EINVAL;
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_idx = val;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_idx);
-
-static ssize_t cntr_rld_val_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val = drvdata->cntr_rld_val[drvdata->cntr_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_val_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_val);
-
-static ssize_t cntr_event_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val = drvdata->cntr_event[drvdata->cntr_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_event_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_event);
-
-static ssize_t cntr_rld_event_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val = drvdata->cntr_rld_event[drvdata->cntr_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_event_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_event);
-
-static ssize_t cntr_val_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       int i, ret = 0;
-       u32 val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (!drvdata->enable) {
-               spin_lock(&drvdata->spinlock);
-               for (i = 0; i < drvdata->nr_cntr; i++)
-                       ret += sprintf(buf, "counter %d: %x\n",
-                                      i, drvdata->cntr_val[i]);
-               spin_unlock(&drvdata->spinlock);
-               return ret;
-       }
-
-       for (i = 0; i < drvdata->nr_cntr; i++) {
-               val = etm_readl(drvdata, ETMCNTVRn(i));
-               ret += sprintf(buf, "counter %d: %x\n", i, val);
-       }
-
-       return ret;
-}
-
-static ssize_t cntr_val_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
+       config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       for (i = 0; i < ETM_MAX_CNTR; i++) {
+               config->cntr_rld_val[i] = 0x0;
+               config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
+               config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
+               config->cntr_val[i] = 0x0;
+       }
 
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_val[drvdata->cntr_idx] = val;
-       spin_unlock(&drvdata->spinlock);
+       config->seq_curr_state = 0x0;
+       config->ctxid_idx = 0x0;
+       for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+               config->ctxid_pid[i] = 0x0;
+               config->ctxid_vpid[i] = 0x0;
+       }
 
-       return size;
+       config->ctxid_mask = 0x0;
 }
-static DEVICE_ATTR_RW(cntr_val);
 
-static ssize_t seq_12_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+void etm_config_trace_mode(struct etm_config *config)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       u32 flags, mode;
 
-       val = drvdata->seq_12_event;
-       return sprintf(buf, "%#lx\n", val);
-}
+       mode = config->mode;
 
-static ssize_t seq_12_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       /* excluding kernel AND user space doesn't make sense */
+       if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+               return;
 
-       drvdata->seq_12_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_12_event);
+       /* nothing to do if neither flags are set */
+       if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+               return;
 
-static ssize_t seq_21_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       flags = (1 << 0 |       /* instruction execute */
+                3 << 3 |       /* ARM instruction */
+                0 << 5 |       /* No data value comparison */
+                0 << 7 |       /* No exact mach */
+                0 << 8);       /* Ignore context ID */
 
-       val = drvdata->seq_21_event;
-       return sprintf(buf, "%#lx\n", val);
-}
+       /* No need to worry about single address comparators. */
+       config->enable_ctrl2 = 0x0;
 
-static ssize_t seq_21_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Bit 0 is address range comparator 1 */
+       config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       /*
+        * On ETMv3.5:
+        * ETMACTRn[13,11] == Non-secure state comparison control
+        * ETMACTRn[12,10] == Secure state comparison control
+        *
+        * b00 == Match in all modes in this state
+        * b01 == Do not match in any more in this state
+        * b10 == Match in all modes excepts user mode in this state
+        * b11 == Match only in user mode in this state
+        */
 
-       drvdata->seq_21_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_21_event);
+       /* Tracing in secure mode is not supported at this time */
+       flags |= (0 << 12 | 1 << 10);
 
-static ssize_t seq_23_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       if (mode & ETM_MODE_EXCL_USER) {
+               /* exclude user, match all modes except user mode */
+               flags |= (1 << 13 | 0 << 11);
+       } else {
+               /* exclude kernel, match only in user mode */
+               flags |= (1 << 13 | 1 << 11);
+       }
 
-       val = drvdata->seq_23_event;
-       return sprintf(buf, "%#lx\n", val);
+       /*
+        * The ETMEEVR register is already set to "hard wire A".  As such
+        * all there is to do is setup an address comparator that spans
+        * the entire address range and configure the state and mode bits.
+        */
+       config->addr_val[0] = (u32) 0x0;
+       config->addr_val[1] = (u32) ~0x0;
+       config->addr_acctype[0] = flags;
+       config->addr_acctype[1] = flags;
+       config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+       config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 }
 
-static ssize_t seq_23_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+
+static int etm_parse_event_config(struct etm_drvdata *drvdata,
+                                 struct perf_event_attr *attr)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       if (!attr)
+               return -EINVAL;
 
-       drvdata->seq_23_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_23_event);
+       /* Clear configuration from previous run */
+       memset(config, 0, sizeof(struct etm_config));
 
-static ssize_t seq_31_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       if (attr->exclude_kernel)
+               config->mode = ETM_MODE_EXCL_KERN;
 
-       val = drvdata->seq_31_event;
-       return sprintf(buf, "%#lx\n", val);
-}
+       if (attr->exclude_user)
+               config->mode = ETM_MODE_EXCL_USER;
 
-static ssize_t seq_31_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Always start from the default config */
+       etm_set_default(config);
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       /*
+        * By default the tracers are configured to trace the whole address
+        * range.  Narrow the field only if requested by user space.
+        */
+       if (config->mode)
+               etm_config_trace_mode(config);
 
-       drvdata->seq_31_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_31_event);
+       /*
+        * At this time only cycle accurate and timestamp options are
+        * available.
+        */
+       if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
+               return -EINVAL;
 
-static ssize_t seq_32_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       config->ctrl = attr->config;
 
-       val = drvdata->seq_32_event;
-       return sprintf(buf, "%#lx\n", val);
+       return 0;
 }
 
-static ssize_t seq_32_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
+static void etm_enable_hw(void *info)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       int i;
+       u32 etmcr;
+       struct etm_drvdata *drvdata = info;
+       struct etm_config *config = &drvdata->config;
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       CS_UNLOCK(drvdata->base);
 
-       drvdata->seq_32_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_32_event);
+       /* Turn engine on */
+       etm_clr_pwrdwn(drvdata);
+       /* Apply power to trace registers */
+       etm_set_pwrup(drvdata);
+       /* Make sure all registers are accessible */
+       etm_os_unlock(drvdata);
 
-static ssize_t seq_13_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       etm_set_prog(drvdata);
+
+       etmcr = etm_readl(drvdata, ETMCR);
+       /* Clear setting from a previous run if need be */
+       etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
+       etmcr |= drvdata->port_size;
+       etmcr |= ETMCR_ETM_EN;
+       etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
+       etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
+       etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
+       etm_writel(drvdata, config->enable_event, ETMTEEVR);
+       etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
+       etm_writel(drvdata, config->fifofull_level, ETMFFLR);
+       for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+               etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
+               etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
+       }
+       for (i = 0; i < drvdata->nr_cntr; i++) {
+               etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
+               etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
+               etm_writel(drvdata, config->cntr_rld_event[i],
+                          ETMCNTRLDEVRn(i));
+               etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
+       }
+       etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
+       etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
+       etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
+       etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
+       etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
+       etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
+       etm_writel(drvdata, config->seq_curr_state, ETMSQR);
+       for (i = 0; i < drvdata->nr_ext_out; i++)
+               etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
+       for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+               etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
+       etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
+       etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
+       /* No external input selected */
+       etm_writel(drvdata, 0x0, ETMEXTINSELR);
+       etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
+       /* No auxiliary control selected */
+       etm_writel(drvdata, 0x0, ETMAUXCR);
+       etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
+       /* No VMID comparator value selected */
+       etm_writel(drvdata, 0x0, ETMVMIDCVR);
+
+       etm_clr_prog(drvdata);
+       CS_LOCK(drvdata->base);
 
-       val = drvdata->seq_13_event;
-       return sprintf(buf, "%#lx\n", val);
+       dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 }
 
-static ssize_t seq_13_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
+static int etm_cpu_id(struct coresight_device *csdev)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       drvdata->seq_13_event = val & ETM_EVENT_MASK;
-       return size;
+       return drvdata->cpu;
 }
-static DEVICE_ATTR_RW(seq_13_event);
 
-static ssize_t seq_curr_state_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
+int etm_get_trace_id(struct etm_drvdata *drvdata)
 {
-       unsigned long val, flags;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long flags;
+       int trace_id = -1;
 
-       if (!drvdata->enable) {
-               val = drvdata->seq_curr_state;
+       if (!drvdata)
                goto out;
-       }
+
+       if (!local_read(&drvdata->mode))
+               return drvdata->traceid;
 
        pm_runtime_get_sync(drvdata->dev);
+
        spin_lock_irqsave(&drvdata->spinlock, flags);
 
        CS_UNLOCK(drvdata->base);
-       val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+       trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
        CS_LOCK(drvdata->base);
 
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
        pm_runtime_put(drvdata->dev);
-out:
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_curr_state_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       if (val > ETM_SEQ_STATE_MAX_VAL)
-               return -EINVAL;
 
-       drvdata->seq_curr_state = val;
+out:
+       return trace_id;
 
-       return size;
 }
-static DEVICE_ATTR_RW(seq_curr_state);
 
-static ssize_t ctxid_idx_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static int etm_trace_id(struct coresight_device *csdev)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       val = drvdata->ctxid_idx;
-       return sprintf(buf, "%#lx\n", val);
+       return etm_get_trace_id(drvdata);
 }
 
-static ssize_t ctxid_idx_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
+static int etm_enable_perf(struct coresight_device *csdev,
+                          struct perf_event_attr *attr)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       if (val >= drvdata->nr_ctxid_cmp)
+       if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
                return -EINVAL;
 
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->ctxid_idx = val;
-       spin_unlock(&drvdata->spinlock);
+       /* Configure the tracer based on the session's specifics */
+       etm_parse_event_config(drvdata, attr);
+       /* And enable it */
+       etm_enable_hw(drvdata);
 
-       return size;
+       return 0;
 }
-static DEVICE_ATTR_RW(ctxid_idx);
 
-static ssize_t ctxid_pid_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static int etm_enable_sysfs(struct coresight_device *csdev)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       int ret;
 
        spin_lock(&drvdata->spinlock);
-       val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
 
-static ssize_t ctxid_pid_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       int ret;
-       unsigned long vpid, pid;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /*
+        * Configure the ETM only if the CPU is online.  If it isn't online
+        * hw configuration will take place when 'CPU_STARTING' is received
+        * in @etm_cpu_callback.
+        */
+       if (cpu_online(drvdata->cpu)) {
+               ret = smp_call_function_single(drvdata->cpu,
+                                              etm_enable_hw, drvdata, 1);
+               if (ret)
+                       goto err;
+       }
 
-       ret = kstrtoul(buf, 16, &vpid);
-       if (ret)
-               return ret;
+       drvdata->sticky_enable = true;
+       spin_unlock(&drvdata->spinlock);
 
-       pid = coresight_vpid_to_pid(vpid);
+       dev_info(drvdata->dev, "ETM tracing enabled\n");
+       return 0;
 
-       spin_lock(&drvdata->spinlock);
-       drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
-       drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
+err:
        spin_unlock(&drvdata->spinlock);
-
-       return size;
+       return ret;
 }
-static DEVICE_ATTR_RW(ctxid_pid);
 
-static ssize_t ctxid_mask_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static int etm_enable(struct coresight_device *csdev,
+                     struct perf_event_attr *attr, u32 mode)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       int ret;
+       u32 val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       val = drvdata->ctxid_mask;
-       return sprintf(buf, "%#lx\n", val);
-}
+       val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
 
-static ssize_t ctxid_mask_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Someone is already using the tracer */
+       if (val)
+               return -EBUSY;
+
+       switch (mode) {
+       case CS_MODE_SYSFS:
+               ret = etm_enable_sysfs(csdev);
+               break;
+       case CS_MODE_PERF:
+               ret = etm_enable_perf(csdev, attr);
+               break;
+       default:
+               ret = -EINVAL;
+       }
 
-       ret = kstrtoul(buf, 16, &val);
+       /* The tracer didn't start */
        if (ret)
-               return ret;
+               local_set(&drvdata->mode, CS_MODE_DISABLED);
 
-       drvdata->ctxid_mask = val;
-       return size;
+       return ret;
 }
-static DEVICE_ATTR_RW(ctxid_mask);
 
-static ssize_t sync_freq_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static void etm_disable_hw(void *info)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->sync_freq;
-       return sprintf(buf, "%#lx\n", val);
-}
+       int i;
+       struct etm_drvdata *drvdata = info;
+       struct etm_config *config = &drvdata->config;
 
-static ssize_t sync_freq_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       CS_UNLOCK(drvdata->base);
+       etm_set_prog(drvdata);
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       /* Read back sequencer and counters for post trace analysis */
+       config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
 
-       drvdata->sync_freq = val & ETM_SYNC_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(sync_freq);
+       for (i = 0; i < drvdata->nr_cntr; i++)
+               config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
 
-static ssize_t timestamp_event_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       etm_set_pwrdwn(drvdata);
+       CS_LOCK(drvdata->base);
 
-       val = drvdata->timestamp_event;
-       return sprintf(buf, "%#lx\n", val);
+       dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
 }
 
-static ssize_t timestamp_event_store(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t size)
+static void etm_disable_perf(struct coresight_device *csdev)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+               return;
 
-       drvdata->timestamp_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(timestamp_event);
+       CS_UNLOCK(drvdata->base);
 
-static ssize_t cpu_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
-{
-       int val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Setting the prog bit disables tracing immediately */
+       etm_set_prog(drvdata);
 
-       val = drvdata->cpu;
-       return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+       /*
+        * There is no way to know when the tracer will be used again so
+        * power down the tracer.
+        */
+       etm_set_pwrdwn(drvdata);
 
+       CS_LOCK(drvdata->base);
 }
-static DEVICE_ATTR_RO(cpu);
 
-static ssize_t traceid_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+static void etm_disable_sysfs(struct coresight_device *csdev)
 {
-       unsigned long val, flags;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       if (!drvdata->enable) {
-               val = drvdata->traceid;
-               goto out;
-       }
+       /*
+        * Taking hotplug lock here protects from clocks getting disabled
+        * with tracing being left on (crash scenario) if user disable occurs
+        * after cpu online mask indicates the cpu is offline but before the
+        * DYING hotplug callback is serviced by the ETM driver.
+        */
+       get_online_cpus();
+       spin_lock(&drvdata->spinlock);
 
-       pm_runtime_get_sync(drvdata->dev);
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       CS_UNLOCK(drvdata->base);
+       /*
+        * Executing etm_disable_hw on the cpu whose ETM is being disabled
+        * ensures that register writes occur when cpu is powered.
+        */
+       smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
 
-       val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+       spin_unlock(&drvdata->spinlock);
+       put_online_cpus();
 
-       CS_LOCK(drvdata->base);
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(drvdata->dev);
-out:
-       return sprintf(buf, "%#lx\n", val);
+       dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
-static ssize_t traceid_store(struct device *dev,
-                            struct device_attribute *attr,
-                            const char *buf, size_t size)
+static void etm_disable(struct coresight_device *csdev)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       drvdata->traceid = val & ETM_TRACEID_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(traceid);
-
-static struct attribute *coresight_etm_attrs[] = {
-       &dev_attr_nr_addr_cmp.attr,
-       &dev_attr_nr_cntr.attr,
-       &dev_attr_nr_ctxid_cmp.attr,
-       &dev_attr_etmsr.attr,
-       &dev_attr_reset.attr,
-       &dev_attr_mode.attr,
-       &dev_attr_trigger_event.attr,
-       &dev_attr_enable_event.attr,
-       &dev_attr_fifofull_level.attr,
-       &dev_attr_addr_idx.attr,
-       &dev_attr_addr_single.attr,
-       &dev_attr_addr_range.attr,
-       &dev_attr_addr_start.attr,
-       &dev_attr_addr_stop.attr,
-       &dev_attr_addr_acctype.attr,
-       &dev_attr_cntr_idx.attr,
-       &dev_attr_cntr_rld_val.attr,
-       &dev_attr_cntr_event.attr,
-       &dev_attr_cntr_rld_event.attr,
-       &dev_attr_cntr_val.attr,
-       &dev_attr_seq_12_event.attr,
-       &dev_attr_seq_21_event.attr,
-       &dev_attr_seq_23_event.attr,
-       &dev_attr_seq_31_event.attr,
-       &dev_attr_seq_32_event.attr,
-       &dev_attr_seq_13_event.attr,
-       &dev_attr_seq_curr_state.attr,
-       &dev_attr_ctxid_idx.attr,
-       &dev_attr_ctxid_pid.attr,
-       &dev_attr_ctxid_mask.attr,
-       &dev_attr_sync_freq.attr,
-       &dev_attr_timestamp_event.attr,
-       &dev_attr_traceid.attr,
-       &dev_attr_cpu.attr,
-       NULL,
-};
+       u32 mode;
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-#define coresight_simple_func(name, offset)                             \
-static ssize_t name##_show(struct device *_dev,                         \
-                          struct device_attribute *attr, char *buf)    \
-{                                                                       \
-       struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent);    \
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
-                        readl_relaxed(drvdata->base + offset));        \
-}                                                                       \
-DEVICE_ATTR_RO(name)
-
-coresight_simple_func(etmccr, ETMCCR);
-coresight_simple_func(etmccer, ETMCCER);
-coresight_simple_func(etmscr, ETMSCR);
-coresight_simple_func(etmidr, ETMIDR);
-coresight_simple_func(etmcr, ETMCR);
-coresight_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_simple_func(etmteevr, ETMTEEVR);
-coresight_simple_func(etmtssvr, ETMTSSCR);
-coresight_simple_func(etmtecr1, ETMTECR1);
-coresight_simple_func(etmtecr2, ETMTECR2);
-
-static struct attribute *coresight_etm_mgmt_attrs[] = {
-       &dev_attr_etmccr.attr,
-       &dev_attr_etmccer.attr,
-       &dev_attr_etmscr.attr,
-       &dev_attr_etmidr.attr,
-       &dev_attr_etmcr.attr,
-       &dev_attr_etmtraceidr.attr,
-       &dev_attr_etmteevr.attr,
-       &dev_attr_etmtssvr.attr,
-       &dev_attr_etmtecr1.attr,
-       &dev_attr_etmtecr2.attr,
-       NULL,
-};
+       /*
+        * For as long as the tracer isn't disabled another entity can't
+        * change its status.  As such we can read the status here without
+        * fearing it will change under us.
+        */
+       mode = local_read(&drvdata->mode);
 
-static const struct attribute_group coresight_etm_group = {
-       .attrs = coresight_etm_attrs,
-};
+       switch (mode) {
+       case CS_MODE_DISABLED:
+               break;
+       case CS_MODE_SYSFS:
+               etm_disable_sysfs(csdev);
+               break;
+       case CS_MODE_PERF:
+               etm_disable_perf(csdev);
+               break;
+       default:
+               WARN_ON_ONCE(mode);
+               return;
+       }
 
+       if (mode)
+               local_set(&drvdata->mode, CS_MODE_DISABLED);
+}
 
-static const struct attribute_group coresight_etm_mgmt_group = {
-       .attrs = coresight_etm_mgmt_attrs,
-       .name = "mgmt",
+static const struct coresight_ops_source etm_source_ops = {
+       .cpu_id         = etm_cpu_id,
+       .trace_id       = etm_trace_id,
+       .enable         = etm_enable,
+       .disable        = etm_disable,
 };
 
-static const struct attribute_group *coresight_etm_groups[] = {
-       &coresight_etm_group,
-       &coresight_etm_mgmt_group,
-       NULL,
+static const struct coresight_ops etm_cs_ops = {
+       .source_ops     = &etm_source_ops,
 };
 
 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
@@ -1658,7 +657,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
                        etmdrvdata[cpu]->os_unlock = true;
                }
 
-               if (etmdrvdata[cpu]->enable)
+               if (local_read(&etmdrvdata[cpu]->mode))
                        etm_enable_hw(etmdrvdata[cpu]);
                spin_unlock(&etmdrvdata[cpu]->spinlock);
                break;
@@ -1671,7 +670,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
 
        case CPU_DYING:
                spin_lock(&etmdrvdata[cpu]->spinlock);
-               if (etmdrvdata[cpu]->enable)
+               if (local_read(&etmdrvdata[cpu]->mode))
                        etm_disable_hw(etmdrvdata[cpu]);
                spin_unlock(&etmdrvdata[cpu]->spinlock);
                break;
@@ -1707,6 +706,9 @@ static void etm_init_arch_data(void *info)
        u32 etmccr;
        struct etm_drvdata *drvdata = info;
 
+       /* Make sure all registers are accessible */
+       etm_os_unlock(drvdata);
+
        CS_UNLOCK(drvdata->base);
 
        /* First dummy read */
@@ -1743,40 +745,9 @@ static void etm_init_arch_data(void *info)
        CS_LOCK(drvdata->base);
 }
 
-static void etm_init_default_data(struct etm_drvdata *drvdata)
+static void etm_init_trace_id(struct etm_drvdata *drvdata)
 {
-       /*
-        * A trace ID of value 0 is invalid, so let's start at some
-        * random value that fits in 7 bits and will be just as good.
-        */
-       static int etm3x_traceid = 0x10;
-
-       u32 flags = (1 << 0 | /* instruction execute*/
-                    3 << 3 | /* ARM instruction */
-                    0 << 5 | /* No data value comparison */
-                    0 << 7 | /* No exact mach */
-                    0 << 8 | /* Ignore context ID */
-                    0 << 10); /* Security ignored */
-
-       /*
-        * Initial configuration only - guarantees sources handled by
-        * this driver have a unique ID at startup time but not between
-        * all other types of sources.  For that we lean on the core
-        * framework.
-        */
-       drvdata->traceid = etm3x_traceid++;
-       drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
-       drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
-       if (drvdata->nr_addr_cmp >= 2) {
-               drvdata->addr_val[0] = (u32) _stext;
-               drvdata->addr_val[1] = (u32) _etext;
-               drvdata->addr_acctype[0] = flags;
-               drvdata->addr_acctype[1] = flags;
-               drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
-               drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
-       }
-
-       etm_set_default(drvdata);
+       drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
 }
 
 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
@@ -1831,9 +802,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
        get_online_cpus();
        etmdrvdata[drvdata->cpu] = drvdata;
 
-       if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
-               drvdata->os_unlock = true;
-
        if (smp_call_function_single(drvdata->cpu,
                                     etm_init_arch_data,  drvdata, 1))
                dev_err(dev, "ETM arch init failed\n");
@@ -1847,7 +815,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
                ret = -EINVAL;
                goto err_arch_supported;
        }
-       etm_init_default_data(drvdata);
+
+       etm_init_trace_id(drvdata);
+       etm_set_default(&drvdata->config);
 
        desc->type = CORESIGHT_DEV_TYPE_SOURCE;
        desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1861,6 +831,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
                goto err_arch_supported;
        }
 
+       ret = etm_perf_symlink(drvdata->csdev, true);
+       if (ret) {
+               coresight_unregister(drvdata->csdev);
+               goto err_arch_supported;
+       }
+
        pm_runtime_put(&adev->dev);
        dev_info(dev, "%s initialized\n", (char *)id->data);
 
@@ -1877,17 +853,6 @@ err_arch_supported:
        return ret;
 }
 
-static int etm_remove(struct amba_device *adev)
-{
-       struct etm_drvdata *drvdata = amba_get_drvdata(adev);
-
-       coresight_unregister(drvdata->csdev);
-       if (--etm_count == 0)
-               unregister_hotcpu_notifier(&etm_cpu_notifier);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int etm_runtime_suspend(struct device *dev)
 {
@@ -1948,13 +913,9 @@ static struct amba_driver etm_driver = {
                .name   = "coresight-etm3x",
                .owner  = THIS_MODULE,
                .pm     = &etm_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
        .probe          = etm_probe,
-       .remove         = etm_remove,
        .id_table       = etm_ids,
 };
-
-module_amba_driver(etm_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
+builtin_amba_driver(etm_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
new file mode 100644 (file)
index 0000000..7c84308
--- /dev/null
@@ -0,0 +1,2126 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+#include "coresight-etm4x.h"
+
+static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
+{
+       u8 idx;
+       struct etmv4_config *config = &drvdata->config;
+
+       idx = config->addr_idx;
+
+       /*
+        * TRCACATRn.TYPE bit[1:0]: type of comparison
+        * the trace unit performs
+        */
+       if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
+               if (idx % 2 != 0)
+                       return -EINVAL;
+
+               /*
+                * We are performing instruction address comparison. Set the
+                * relevant bit of ViewInst Include/Exclude Control register
+                * for corresponding address comparator pair.
+                */
+               if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
+                   config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
+                       return -EINVAL;
+
+               if (exclude == true) {
+                       /*
+                        * Set exclude bit and unset the include bit
+                        * corresponding to comparator pair
+                        */
+                       config->viiectlr |= BIT(idx / 2 + 16);
+                       config->viiectlr &= ~BIT(idx / 2);
+               } else {
+                       /*
+                        * Set include bit and unset exclude bit
+                        * corresponding to comparator pair
+                        */
+                       config->viiectlr |= BIT(idx / 2);
+                       config->viiectlr &= ~BIT(idx / 2 + 16);
+               }
+       }
+       return 0;
+}
+
+static ssize_t nr_pe_cmp_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_pe_cmp;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_pe_cmp);
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_addr_cmp;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_cntr;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ext_inp_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_ext_inp;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ext_inp);
+
+static ssize_t numcidc_show(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->numcidc;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(numcidc);
+
+static ssize_t numvmidc_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->numvmidc;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(numvmidc);
+
+static ssize_t nrseqstate_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nrseqstate;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nrseqstate);
+
+static ssize_t nr_resource_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_resource;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_resource);
+
+static ssize_t nr_ss_cmp_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_ss_cmp;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ss_cmp);
+
+static ssize_t reset_store(struct device *dev,
+                          struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       int i;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       if (val)
+               config->mode = 0x0;
+
+       /* Disable data tracing: do not trace load and store data transfers */
+       config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
+       config->cfg &= ~(BIT(1) | BIT(2));
+
+       /* Disable data value and data address tracing */
+       config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
+                          ETM_MODE_DATA_TRACE_VAL);
+       config->cfg &= ~(BIT(16) | BIT(17));
+
+       /* Disable all events tracing */
+       config->eventctrl0 = 0x0;
+       config->eventctrl1 = 0x0;
+
+       /* Disable timestamp event */
+       config->ts_ctrl = 0x0;
+
+       /* Disable stalling */
+       config->stall_ctrl = 0x0;
+
+       /* Reset trace synchronization period  to 2^8 = 256 bytes*/
+       if (drvdata->syncpr == false)
+               config->syncfreq = 0x8;
+
+       /*
+        * Enable ViewInst to trace everything with start-stop logic in
+        * started state. ARM recommends start-stop logic is set before
+        * each trace run.
+        */
+       config->vinst_ctrl |= BIT(0);
+       if (drvdata->nr_addr_cmp == true) {
+               config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
+               /* SSSTATUS, bit[9] */
+               config->vinst_ctrl |= BIT(9);
+       }
+
+       /* No address range filtering for ViewInst */
+       config->viiectlr = 0x0;
+
+       /* No start-stop filtering for ViewInst */
+       config->vissctlr = 0x0;
+
+       /* Disable seq events */
+       for (i = 0; i < drvdata->nrseqstate-1; i++)
+               config->seq_ctrl[i] = 0x0;
+       config->seq_rst = 0x0;
+       config->seq_state = 0x0;
+
+       /* Disable external input events */
+       config->ext_inp = 0x0;
+
+       config->cntr_idx = 0x0;
+       for (i = 0; i < drvdata->nr_cntr; i++) {
+               config->cntrldvr[i] = 0x0;
+               config->cntr_ctrl[i] = 0x0;
+               config->cntr_val[i] = 0x0;
+       }
+
+       config->res_idx = 0x0;
+       for (i = 0; i < drvdata->nr_resource; i++)
+               config->res_ctrl[i] = 0x0;
+
+       for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+               config->ss_ctrl[i] = 0x0;
+               config->ss_pe_cmp[i] = 0x0;
+       }
+
+       config->addr_idx = 0x0;
+       for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+               config->addr_val[i] = 0x0;
+               config->addr_acc[i] = 0x0;
+               config->addr_type[i] = ETM_ADDR_TYPE_NONE;
+       }
+
+       config->ctxid_idx = 0x0;
+       for (i = 0; i < drvdata->numcidc; i++) {
+               config->ctxid_pid[i] = 0x0;
+               config->ctxid_vpid[i] = 0x0;
+       }
+
+       config->ctxid_mask0 = 0x0;
+       config->ctxid_mask1 = 0x0;
+
+       config->vmid_idx = 0x0;
+       for (i = 0; i < drvdata->numvmidc; i++)
+               config->vmid_val[i] = 0x0;
+       config->vmid_mask0 = 0x0;
+       config->vmid_mask1 = 0x0;
+
+       drvdata->trcid = drvdata->cpu + 1;
+
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+                        struct device_attribute *attr,
+                        char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->mode;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buf, size_t size)
+{
+       unsigned long val, mode;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       config->mode = val & ETMv4_MODE_ALL;
+
+       if (config->mode & ETM_MODE_EXCLUDE)
+               etm4_set_mode_exclude(drvdata, true);
+       else
+               etm4_set_mode_exclude(drvdata, false);
+
+       if (drvdata->instrp0 == true) {
+               /* start by clearing instruction P0 field */
+               config->cfg  &= ~(BIT(1) | BIT(2));
+               if (config->mode & ETM_MODE_LOAD)
+                       /* 0b01 Trace load instructions as P0 instructions */
+                       config->cfg  |= BIT(1);
+               if (config->mode & ETM_MODE_STORE)
+                       /* 0b10 Trace store instructions as P0 instructions */
+                       config->cfg  |= BIT(2);
+               if (config->mode & ETM_MODE_LOAD_STORE)
+                       /*
+                        * 0b11 Trace load and store instructions
+                        * as P0 instructions
+                        */
+                       config->cfg  |= BIT(1) | BIT(2);
+       }
+
+       /* bit[3], Branch broadcast mode */
+       if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
+               config->cfg |= BIT(3);
+       else
+               config->cfg &= ~BIT(3);
+
+       /* bit[4], Cycle counting instruction trace bit */
+       if ((config->mode & ETMv4_MODE_CYCACC) &&
+               (drvdata->trccci == true))
+               config->cfg |= BIT(4);
+       else
+               config->cfg &= ~BIT(4);
+
+       /* bit[6], Context ID tracing bit */
+       if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
+               config->cfg |= BIT(6);
+       else
+               config->cfg &= ~BIT(6);
+
+       if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
+               config->cfg |= BIT(7);
+       else
+               config->cfg &= ~BIT(7);
+
+       /* bits[10:8], Conditional instruction tracing bit */
+       mode = ETM_MODE_COND(config->mode);
+       if (drvdata->trccond == true) {
+               config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
+               config->cfg |= mode << 8;
+       }
+
+       /* bit[11], Global timestamp tracing bit */
+       if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
+               config->cfg |= BIT(11);
+       else
+               config->cfg &= ~BIT(11);
+
+       /* bit[12], Return stack enable bit */
+       if ((config->mode & ETM_MODE_RETURNSTACK) &&
+                                       (drvdata->retstack == true))
+               config->cfg |= BIT(12);
+       else
+               config->cfg &= ~BIT(12);
+
+       /* bits[14:13], Q element enable field */
+       mode = ETM_MODE_QELEM(config->mode);
+       /* start by clearing QE bits */
+       config->cfg &= ~(BIT(13) | BIT(14));
+       /* if supported, Q elements with instruction counts are enabled */
+       if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
+               config->cfg |= BIT(13);
+       /*
+        * if supported, Q elements with and without instruction
+        * counts are enabled
+        */
+       if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
+               config->cfg |= BIT(14);
+
+       /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
+       if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
+           (drvdata->atbtrig == true))
+               config->eventctrl1 |= BIT(11);
+       else
+               config->eventctrl1 &= ~BIT(11);
+
+       /* bit[12], Low-power state behavior override bit */
+       if ((config->mode & ETM_MODE_LPOVERRIDE) &&
+           (drvdata->lpoverride == true))
+               config->eventctrl1 |= BIT(12);
+       else
+               config->eventctrl1 &= ~BIT(12);
+
+       /* bit[8], Instruction stall bit */
+       if (config->mode & ETM_MODE_ISTALL_EN)
+               config->stall_ctrl |= BIT(8);
+       else
+               config->stall_ctrl &= ~BIT(8);
+
+       /* bit[10], Prioritize instruction trace bit */
+       if (config->mode & ETM_MODE_INSTPRIO)
+               config->stall_ctrl |= BIT(10);
+       else
+               config->stall_ctrl &= ~BIT(10);
+
+       /* bit[13], Trace overflow prevention bit */
+       if ((config->mode & ETM_MODE_NOOVERFLOW) &&
+               (drvdata->nooverflow == true))
+               config->stall_ctrl |= BIT(13);
+       else
+               config->stall_ctrl &= ~BIT(13);
+
+       /* bit[9] Start/stop logic control bit */
+       if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
+               config->vinst_ctrl |= BIT(9);
+       else
+               config->vinst_ctrl &= ~BIT(9);
+
+       /* bit[10], Whether a trace unit must trace a Reset exception */
+       if (config->mode & ETM_MODE_TRACE_RESET)
+               config->vinst_ctrl |= BIT(10);
+       else
+               config->vinst_ctrl &= ~BIT(10);
+
+       /* bit[11], Whether a trace unit must trace a system error exception */
+       if ((config->mode & ETM_MODE_TRACE_ERR) &&
+               (drvdata->trc_error == true))
+               config->vinst_ctrl |= BIT(11);
+       else
+               config->vinst_ctrl &= ~BIT(11);
+
+       if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+               etm4_config_trace_mode(config);
+
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t pe_show(struct device *dev,
+                      struct device_attribute *attr,
+                      char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->pe_sel;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t pe_store(struct device *dev,
+                       struct device_attribute *attr,
+                       const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       if (val > drvdata->nr_pe) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+
+       config->pe_sel = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(pe);
+
+static ssize_t event_show(struct device *dev,
+                         struct device_attribute *attr,
+                         char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->eventctrl0;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_store(struct device *dev,
+                          struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       switch (drvdata->nr_event) {
+       case 0x0:
+               /* EVENT0, bits[7:0] */
+               config->eventctrl0 = val & 0xFF;
+               break;
+       case 0x1:
+                /* EVENT1, bits[15:8] */
+               config->eventctrl0 = val & 0xFFFF;
+               break;
+       case 0x2:
+               /* EVENT2, bits[23:16] */
+               config->eventctrl0 = val & 0xFFFFFF;
+               break;
+       case 0x3:
+               /* EVENT3, bits[31:24] */
+               config->eventctrl0 = val;
+               break;
+       default:
+               break;
+       }
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(event);
+
+static ssize_t event_instren_show(struct device *dev,
+                                 struct device_attribute *attr,
+                                 char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = BMVAL(config->eventctrl1, 0, 3);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_instren_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       /* start by clearing all instruction event enable bits */
+       config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
+       switch (drvdata->nr_event) {
+       case 0x0:
+               /* generate Event element for event 1 */
+               config->eventctrl1 |= val & BIT(1);
+               break;
+       case 0x1:
+               /* generate Event element for event 1 and 2 */
+               config->eventctrl1 |= val & (BIT(0) | BIT(1));
+               break;
+       case 0x2:
+               /* generate Event element for event 1, 2 and 3 */
+               config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
+               break;
+       case 0x3:
+               /* generate Event element for all 4 events */
+               config->eventctrl1 |= val & 0xF;
+               break;
+       default:
+               break;
+       }
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(event_instren);
+
+static ssize_t event_ts_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->ts_ctrl;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_ts_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (!drvdata->ts_size)
+               return -EINVAL;
+
+       config->ts_ctrl = val & ETMv4_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(event_ts);
+
+static ssize_t syncfreq_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->syncfreq;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t syncfreq_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (drvdata->syncpr == true)
+               return -EINVAL;
+
+       config->syncfreq = val & ETMv4_SYNC_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(syncfreq);
+
+static ssize_t cyc_threshold_show(struct device *dev,
+                                 struct device_attribute *attr,
+                                 char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->ccctlr;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cyc_threshold_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val < drvdata->ccitmin)
+               return -EINVAL;
+
+       config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(cyc_threshold);
+
+static ssize_t bb_ctrl_show(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->bb_ctrl;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t bb_ctrl_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (drvdata->trcbb == false)
+               return -EINVAL;
+       if (!drvdata->nr_addr_cmp)
+               return -EINVAL;
+       /*
+        * Bit[7:0] selects which address range comparator is used for
+        * branch broadcast control.
+        */
+       if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
+               return -EINVAL;
+
+       config->bb_ctrl = val;
+       return size;
+}
+static DEVICE_ATTR_RW(bb_ctrl);
+
+static ssize_t event_vinst_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->vinst_ctrl & ETMv4_EVENT_MASK;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_vinst_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       val &= ETMv4_EVENT_MASK;
+       config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
+       config->vinst_ctrl |= val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(event_vinst);
+
+static ssize_t s_exlevel_vinst_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = BMVAL(config->vinst_ctrl, 16, 19);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t s_exlevel_vinst_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
+       config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
+       /* enable instruction tracing for corresponding exception level */
+       val &= drvdata->s_ex_level;
+       config->vinst_ctrl |= (val << 16);
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(s_exlevel_vinst);
+
+static ssize_t ns_exlevel_vinst_show(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       /* EXLEVEL_NS, bits[23:20] */
+       val = BMVAL(config->vinst_ctrl, 20, 23);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ns_exlevel_vinst_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       /* clear EXLEVEL_NS bits (bit[23] is never implemented */
+       config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
+       /* enable instruction tracing for corresponding exception level */
+       val &= drvdata->ns_ex_level;
+       config->vinst_ctrl |= (val << 20);
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(ns_exlevel_vinst);
+
+static ssize_t addr_idx_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->addr_idx;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val >= drvdata->nr_addr_cmp * 2)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->addr_idx = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_instdatatype_show(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       ssize_t len;
+       u8 val, idx;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       val = BMVAL(config->addr_acc[idx], 0, 1);
+       len = scnprintf(buf, PAGE_SIZE, "%s\n",
+                       val == ETM_INSTR_ADDR ? "instr" :
+                       (val == ETM_DATA_LOAD_ADDR ? "data_load" :
+                       (val == ETM_DATA_STORE_ADDR ? "data_store" :
+                       "data_load_store")));
+       spin_unlock(&drvdata->spinlock);
+       return len;
+}
+
+static ssize_t addr_instdatatype_store(struct device *dev,
+                                      struct device_attribute *attr,
+                                      const char *buf, size_t size)
+{
+       u8 idx;
+       char str[20] = "";
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (strlen(buf) >= 20)
+               return -EINVAL;
+       if (sscanf(buf, "%s", str) != 1)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!strcmp(str, "instr"))
+               /* TYPE, bits[1:0] */
+               config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
+
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(addr_instdatatype);
+
+static ssize_t addr_single_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       idx = config->addr_idx;
+       spin_lock(&drvdata->spinlock);
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+       val = (unsigned long)config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = (u64)val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       u8 idx;
+       unsigned long val1, val2;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (idx % 2 != 0) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+       if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+             (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val1 = (unsigned long)config->addr_val[idx];
+       val2 = (unsigned long)config->addr_val[idx + 1];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val1, val2;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+               return -EINVAL;
+       /* lower address comparator cannot have a higher address value */
+       if (val1 > val2)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (idx % 2 != 0) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+             (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = (u64)val1;
+       config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+       config->addr_val[idx + 1] = (u64)val2;
+       config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+       /*
+        * Program include or exclude control bits for vinst or vdata
+        * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
+        */
+       if (config->mode & ETM_MODE_EXCLUDE)
+               etm4_set_mode_exclude(drvdata, true);
+       else
+               etm4_set_mode_exclude(drvdata, false);
+
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val = (unsigned long)config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!drvdata->nr_addr_cmp) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = (u64)val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_START;
+       config->vissctlr |= BIT(idx);
+       /* SSSTATUS, bit[9] - turn on start/stop logic */
+       config->vinst_ctrl |= BIT(9);
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val = (unsigned long)config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!drvdata->nr_addr_cmp) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+              config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = (u64)val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+       config->vissctlr |= BIT(idx + 16);
+       /* SSSTATUS, bit[9] - turn on start/stop logic */
+       config->vinst_ctrl |= BIT(9);
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_ctxtype_show(struct device *dev,
+                                struct device_attribute *attr,
+                                char *buf)
+{
+       ssize_t len;
+       u8 idx, val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       /* CONTEXTTYPE, bits[3:2] */
+       val = BMVAL(config->addr_acc[idx], 2, 3);
+       len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
+                       (val == ETM_CTX_CTXID ? "ctxid" :
+                       (val == ETM_CTX_VMID ? "vmid" : "all")));
+       spin_unlock(&drvdata->spinlock);
+       return len;
+}
+
+static ssize_t addr_ctxtype_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       u8 idx;
+       char str[10] = "";
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (strlen(buf) >= 10)
+               return -EINVAL;
+       if (sscanf(buf, "%s", str) != 1)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!strcmp(str, "none"))
+               /* start by clearing context type bits */
+               config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
+       else if (!strcmp(str, "ctxid")) {
+               /* 0b01 The trace unit performs a Context ID */
+               if (drvdata->numcidc) {
+                       config->addr_acc[idx] |= BIT(2);
+                       config->addr_acc[idx] &= ~BIT(3);
+               }
+       } else if (!strcmp(str, "vmid")) {
+               /* 0b10 The trace unit performs a VMID */
+               if (drvdata->numvmidc) {
+                       config->addr_acc[idx] &= ~BIT(2);
+                       config->addr_acc[idx] |= BIT(3);
+               }
+       } else if (!strcmp(str, "all")) {
+               /*
+                * 0b11 The trace unit performs a Context ID
+                * comparison and a VMID
+                */
+               if (drvdata->numcidc)
+                       config->addr_acc[idx] |= BIT(2);
+               if (drvdata->numvmidc)
+                       config->addr_acc[idx] |= BIT(3);
+       }
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(addr_ctxtype);
+
+static ssize_t addr_context_show(struct device *dev,
+                                struct device_attribute *attr,
+                                char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       /* context ID comparator bits[6:4] */
+       val = BMVAL(config->addr_acc[idx], 4, 6);
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_context_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
+               return -EINVAL;
+       if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
+                    drvdata->numcidc : drvdata->numvmidc))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       /* clear context ID comparator bits[6:4] */
+       config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
+       config->addr_acc[idx] |= (val << 4);
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(addr_context);
+
+static ssize_t seq_idx_show(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->seq_idx;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_idx_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val >= drvdata->nrseqstate - 1)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->seq_idx = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(seq_idx);
+
+static ssize_t seq_state_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->seq_state;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_state_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val >= drvdata->nrseqstate)
+               return -EINVAL;
+
+       config->seq_state = val;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_state);
+
+static ssize_t seq_event_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->seq_idx;
+       val = config->seq_ctrl[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_event_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->seq_idx;
+       /* RST, bits[7:0] */
+       config->seq_ctrl[idx] = val & 0xFF;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(seq_event);
+
+static ssize_t seq_reset_event_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->seq_rst;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_reset_event_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (!(drvdata->nrseqstate))
+               return -EINVAL;
+
+       config->seq_rst = val & ETMv4_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_reset_event);
+
+static ssize_t cntr_idx_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->cntr_idx;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val >= drvdata->nr_cntr)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->cntr_idx = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntrldvr_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->cntr_idx;
+       val = config->cntrldvr[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntrldvr_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val > ETM_CNTR_MAX_VAL)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->cntr_idx;
+       config->cntrldvr[idx] = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(cntrldvr);
+
+static ssize_t cntr_val_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->cntr_idx;
+       val = config->cntr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val > ETM_CNTR_MAX_VAL)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->cntr_idx;
+       config->cntr_val[idx] = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t cntr_ctrl_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->cntr_idx;
+       val = config->cntr_ctrl[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_ctrl_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->cntr_idx;
+       config->cntr_ctrl[idx] = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_ctrl);
+
+static ssize_t res_idx_show(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->res_idx;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t res_idx_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       /* Resource selector pair 0 is always implemented and reserved */
+       if ((val == 0) || (val >= drvdata->nr_resource))
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->res_idx = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(res_idx);
+
+static ssize_t res_ctrl_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->res_idx;
+       val = config->res_ctrl[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t res_ctrl_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->res_idx;
+       /* For odd idx pair inversal bit is RES0 */
+       if (idx % 2 != 0)
+               /* PAIRINV, bit[21] */
+               val &= ~BIT(21);
+       config->res_ctrl[idx] = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(res_ctrl);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->ctxid_idx;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val >= drvdata->numcidc)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->ctxid_idx = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_pid_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->ctxid_idx;
+       val = (unsigned long)config->ctxid_vpid[idx];
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ctxid_pid_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long vpid, pid;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       /*
+        * only implemented when ctxid tracing is enabled, i.e. at least one
+        * ctxid comparator is implemented and ctxid is greater than 0 bits
+        * in length
+        */
+       if (!drvdata->ctxid_size || !drvdata->numcidc)
+               return -EINVAL;
+       if (kstrtoul(buf, 16, &vpid))
+               return -EINVAL;
+
+       pid = coresight_vpid_to_pid(vpid);
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->ctxid_idx;
+       config->ctxid_pid[idx] = (u64)pid;
+       config->ctxid_vpid[idx] = (u64)vpid;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_pid);
+
+static ssize_t ctxid_masks_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       unsigned long val1, val2;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val1 = config->ctxid_mask0;
+       val2 = config->ctxid_mask1;
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t ctxid_masks_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       u8 i, j, maskbyte;
+       unsigned long val1, val2, mask;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       /*
+        * only implemented when ctxid tracing is enabled, i.e. at least one
+        * ctxid comparator is implemented and ctxid is greater than 0 bits
+        * in length
+        */
+       if (!drvdata->ctxid_size || !drvdata->numcidc)
+               return -EINVAL;
+       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       /*
+        * each byte[0..3] controls mask value applied to ctxid
+        * comparator[0..3]
+        */
+       switch (drvdata->numcidc) {
+       case 0x1:
+               /* COMP0, bits[7:0] */
+               config->ctxid_mask0 = val1 & 0xFF;
+               break;
+       case 0x2:
+               /* COMP1, bits[15:8] */
+               config->ctxid_mask0 = val1 & 0xFFFF;
+               break;
+       case 0x3:
+               /* COMP2, bits[23:16] */
+               config->ctxid_mask0 = val1 & 0xFFFFFF;
+               break;
+       case 0x4:
+                /* COMP3, bits[31:24] */
+               config->ctxid_mask0 = val1;
+               break;
+       case 0x5:
+               /* COMP4, bits[7:0] */
+               config->ctxid_mask0 = val1;
+               config->ctxid_mask1 = val2 & 0xFF;
+               break;
+       case 0x6:
+               /* COMP5, bits[15:8] */
+               config->ctxid_mask0 = val1;
+               config->ctxid_mask1 = val2 & 0xFFFF;
+               break;
+       case 0x7:
+               /* COMP6, bits[23:16] */
+               config->ctxid_mask0 = val1;
+               config->ctxid_mask1 = val2 & 0xFFFFFF;
+               break;
+       case 0x8:
+               /* COMP7, bits[31:24] */
+               config->ctxid_mask0 = val1;
+               config->ctxid_mask1 = val2;
+               break;
+       default:
+               break;
+       }
+       /*
+        * If software sets a mask bit to 1, it must program relevant byte
+        * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
+        * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
+        * of ctxid comparator0 value (corresponding to byte 0) register.
+        */
+       mask = config->ctxid_mask0;
+       for (i = 0; i < drvdata->numcidc; i++) {
+               /* mask value of corresponding ctxid comparator */
+               maskbyte = mask & ETMv4_EVENT_MASK;
+               /*
+                * each bit corresponds to a byte of respective ctxid comparator
+                * value register
+                */
+               for (j = 0; j < 8; j++) {
+                       if (maskbyte & 1)
+                               config->ctxid_pid[i] &= ~(0xFF << (j * 8));
+                       maskbyte >>= 1;
+               }
+               /* Select the next ctxid comparator mask value */
+               if (i == 3)
+                       /* ctxid comparators[4-7] */
+                       mask = config->ctxid_mask1;
+               else
+                       mask >>= 0x8;
+       }
+
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_masks);
+
+static ssize_t vmid_idx_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = config->vmid_idx;
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t vmid_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+       if (val >= drvdata->numvmidc)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->vmid_idx = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(vmid_idx);
+
+static ssize_t vmid_val_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       val = (unsigned long)config->vmid_val[config->vmid_idx];
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t vmid_val_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       unsigned long val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       /*
+        * only implemented when vmid tracing is enabled, i.e. at least one
+        * vmid comparator is implemented and at least 8 bit vmid size
+        */
+       if (!drvdata->vmid_size || !drvdata->numvmidc)
+               return -EINVAL;
+       if (kstrtoul(buf, 16, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       config->vmid_val[config->vmid_idx] = (u64)val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(vmid_val);
+
+static ssize_t vmid_masks_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       unsigned long val1, val2;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val1 = config->vmid_mask0;
+       val2 = config->vmid_mask1;
+       spin_unlock(&drvdata->spinlock);
+       return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t vmid_masks_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       u8 i, j, maskbyte;
+       unsigned long val1, val2, mask;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
+
+       /*
+        * only implemented when vmid tracing is enabled, i.e. at least one
+        * vmid comparator is implemented and at least 8 bit vmid size
+        */
+       if (!drvdata->vmid_size || !drvdata->numvmidc)
+               return -EINVAL;
+       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+
+       /*
+        * each byte[0..3] controls mask value applied to vmid
+        * comparator[0..3]
+        */
+       switch (drvdata->numvmidc) {
+       case 0x1:
+               /* COMP0, bits[7:0] */
+               config->vmid_mask0 = val1 & 0xFF;
+               break;
+       case 0x2:
+               /* COMP1, bits[15:8] */
+               config->vmid_mask0 = val1 & 0xFFFF;
+               break;
+       case 0x3:
+               /* COMP2, bits[23:16] */
+               config->vmid_mask0 = val1 & 0xFFFFFF;
+               break;
+       case 0x4:
+               /* COMP3, bits[31:24] */
+               config->vmid_mask0 = val1;
+               break;
+       case 0x5:
+               /* COMP4, bits[7:0] */
+               config->vmid_mask0 = val1;
+               config->vmid_mask1 = val2 & 0xFF;
+               break;
+       case 0x6:
+               /* COMP5, bits[15:8] */
+               config->vmid_mask0 = val1;
+               config->vmid_mask1 = val2 & 0xFFFF;
+               break;
+       case 0x7:
+               /* COMP6, bits[23:16] */
+               config->vmid_mask0 = val1;
+               config->vmid_mask1 = val2 & 0xFFFFFF;
+               break;
+       case 0x8:
+               /* COMP7, bits[31:24] */
+               config->vmid_mask0 = val1;
+               config->vmid_mask1 = val2;
+               break;
+       default:
+               break;
+       }
+
+       /*
+        * If software sets a mask bit to 1, it must program relevant byte
+        * of vmid comparator value 0x0, otherwise behavior is unpredictable.
+        * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
+        * of vmid comparator0 value (corresponding to byte 0) register.
+        */
+       mask = config->vmid_mask0;
+       for (i = 0; i < drvdata->numvmidc; i++) {
+               /* mask value of corresponding vmid comparator */
+               maskbyte = mask & ETMv4_EVENT_MASK;
+               /*
+                * each bit corresponds to a byte of respective vmid comparator
+                * value register
+                */
+               for (j = 0; j < 8; j++) {
+                       if (maskbyte & 1)
+                               config->vmid_val[i] &= ~(0xFF << (j * 8));
+                       maskbyte >>= 1;
+               }
+               /* Select the next vmid comparator mask value */
+               if (i == 3)
+                       /* vmid comparators[4-7] */
+                       mask = config->vmid_mask1;
+               else
+                       mask >>= 0x8;
+       }
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(vmid_masks);
+
+static ssize_t cpu_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       int val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->cpu;
+       return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static struct attribute *coresight_etmv4_attrs[] = {
+       &dev_attr_nr_pe_cmp.attr,
+       &dev_attr_nr_addr_cmp.attr,
+       &dev_attr_nr_cntr.attr,
+       &dev_attr_nr_ext_inp.attr,
+       &dev_attr_numcidc.attr,
+       &dev_attr_numvmidc.attr,
+       &dev_attr_nrseqstate.attr,
+       &dev_attr_nr_resource.attr,
+       &dev_attr_nr_ss_cmp.attr,
+       &dev_attr_reset.attr,
+       &dev_attr_mode.attr,
+       &dev_attr_pe.attr,
+       &dev_attr_event.attr,
+       &dev_attr_event_instren.attr,
+       &dev_attr_event_ts.attr,
+       &dev_attr_syncfreq.attr,
+       &dev_attr_cyc_threshold.attr,
+       &dev_attr_bb_ctrl.attr,
+       &dev_attr_event_vinst.attr,
+       &dev_attr_s_exlevel_vinst.attr,
+       &dev_attr_ns_exlevel_vinst.attr,
+       &dev_attr_addr_idx.attr,
+       &dev_attr_addr_instdatatype.attr,
+       &dev_attr_addr_single.attr,
+       &dev_attr_addr_range.attr,
+       &dev_attr_addr_start.attr,
+       &dev_attr_addr_stop.attr,
+       &dev_attr_addr_ctxtype.attr,
+       &dev_attr_addr_context.attr,
+       &dev_attr_seq_idx.attr,
+       &dev_attr_seq_state.attr,
+       &dev_attr_seq_event.attr,
+       &dev_attr_seq_reset_event.attr,
+       &dev_attr_cntr_idx.attr,
+       &dev_attr_cntrldvr.attr,
+       &dev_attr_cntr_val.attr,
+       &dev_attr_cntr_ctrl.attr,
+       &dev_attr_res_idx.attr,
+       &dev_attr_res_ctrl.attr,
+       &dev_attr_ctxid_idx.attr,
+       &dev_attr_ctxid_pid.attr,
+       &dev_attr_ctxid_masks.attr,
+       &dev_attr_vmid_idx.attr,
+       &dev_attr_vmid_val.attr,
+       &dev_attr_vmid_masks.attr,
+       &dev_attr_cpu.attr,
+       NULL,
+};
+
+#define coresight_etm4x_simple_func(name, offset)                      \
+       coresight_simple_func(struct etmv4_drvdata, name, offset)
+
+coresight_etm4x_simple_func(trcoslsr, TRCOSLSR);
+coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
+coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
+coresight_etm4x_simple_func(trclsr, TRCLSR);
+coresight_etm4x_simple_func(trcconfig, TRCCONFIGR);
+coresight_etm4x_simple_func(trctraceid, TRCTRACEIDR);
+coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
+coresight_etm4x_simple_func(trcdevid, TRCDEVID);
+coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
+coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
+coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
+coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
+coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
+
+static struct attribute *coresight_etmv4_mgmt_attrs[] = {
+       &dev_attr_trcoslsr.attr,
+       &dev_attr_trcpdcr.attr,
+       &dev_attr_trcpdsr.attr,
+       &dev_attr_trclsr.attr,
+       &dev_attr_trcconfig.attr,
+       &dev_attr_trctraceid.attr,
+       &dev_attr_trcauthstatus.attr,
+       &dev_attr_trcdevid.attr,
+       &dev_attr_trcdevtype.attr,
+       &dev_attr_trcpidr0.attr,
+       &dev_attr_trcpidr1.attr,
+       &dev_attr_trcpidr2.attr,
+       &dev_attr_trcpidr3.attr,
+       NULL,
+};
+
+coresight_etm4x_simple_func(trcidr0, TRCIDR0);
+coresight_etm4x_simple_func(trcidr1, TRCIDR1);
+coresight_etm4x_simple_func(trcidr2, TRCIDR2);
+coresight_etm4x_simple_func(trcidr3, TRCIDR3);
+coresight_etm4x_simple_func(trcidr4, TRCIDR4);
+coresight_etm4x_simple_func(trcidr5, TRCIDR5);
+/* trcidr[6,7] are reserved */
+coresight_etm4x_simple_func(trcidr8, TRCIDR8);
+coresight_etm4x_simple_func(trcidr9, TRCIDR9);
+coresight_etm4x_simple_func(trcidr10, TRCIDR10);
+coresight_etm4x_simple_func(trcidr11, TRCIDR11);
+coresight_etm4x_simple_func(trcidr12, TRCIDR12);
+coresight_etm4x_simple_func(trcidr13, TRCIDR13);
+
+static struct attribute *coresight_etmv4_trcidr_attrs[] = {
+       &dev_attr_trcidr0.attr,
+       &dev_attr_trcidr1.attr,
+       &dev_attr_trcidr2.attr,
+       &dev_attr_trcidr3.attr,
+       &dev_attr_trcidr4.attr,
+       &dev_attr_trcidr5.attr,
+       /* trcidr[6,7] are reserved */
+       &dev_attr_trcidr8.attr,
+       &dev_attr_trcidr9.attr,
+       &dev_attr_trcidr10.attr,
+       &dev_attr_trcidr11.attr,
+       &dev_attr_trcidr12.attr,
+       &dev_attr_trcidr13.attr,
+       NULL,
+};
+
+static const struct attribute_group coresight_etmv4_group = {
+       .attrs = coresight_etmv4_attrs,
+};
+
+static const struct attribute_group coresight_etmv4_mgmt_group = {
+       .attrs = coresight_etmv4_mgmt_attrs,
+       .name = "mgmt",
+};
+
+static const struct attribute_group coresight_etmv4_trcidr_group = {
+       .attrs = coresight_etmv4_trcidr_attrs,
+       .name = "trcidr",
+};
+
+const struct attribute_group *coresight_etmv4_groups[] = {
+       &coresight_etmv4_group,
+       &coresight_etmv4_mgmt_group,
+       &coresight_etmv4_trcidr_group,
+       NULL,
+};
index a6707642bb238a68db73aec536ec4e8de6d92e39..462f0dc1575751a01d3778acbbaa30518a1b1dc0 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/fs.h>
 #include <linux/clk.h>
 #include <linux/cpu.h>
 #include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
 #include <linux/pm_wakeup.h>
 #include <linux/amba/bus.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
+#include <linux/perf_event.h>
 #include <linux/pm_runtime.h>
+#include <linux/perf_event.h>
 #include <asm/sections.h>
+#include <asm/local.h>
 
 #include "coresight-etm4x.h"
+#include "coresight-etm-perf.h"
 
 static int boot_enable;
 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
@@ -42,13 +46,13 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
 /* The number of ETMv4 currently registered */
 static int etm4_count;
 static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
+static void etm4_set_default(struct etmv4_config *config);
 
-static void etm4_os_unlock(void *info)
+static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
 {
-       struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
-
        /* Writing any value to ETMOSLAR unlocks the trace registers */
        writel_relaxed(0x0, drvdata->base + TRCOSLAR);
+       drvdata->os_unlock = true;
        isb();
 }
 
@@ -63,16 +67,22 @@ static bool etm4_arch_supported(u8 arch)
        return true;
 }
 
+static int etm4_cpu_id(struct coresight_device *csdev)
+{
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       return drvdata->cpu;
+}
+
 static int etm4_trace_id(struct coresight_device *csdev)
 {
        struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
        unsigned long flags;
        int trace_id = -1;
 
-       if (!drvdata->enable)
+       if (!local_read(&drvdata->mode))
                return drvdata->trcid;
 
-       pm_runtime_get_sync(drvdata->dev);
        spin_lock_irqsave(&drvdata->spinlock, flags);
 
        CS_UNLOCK(drvdata->base);
@@ -81,7 +91,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
        CS_LOCK(drvdata->base);
 
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(drvdata->dev);
 
        return trace_id;
 }
@@ -90,6 +99,7 @@ static void etm4_enable_hw(void *info)
 {
        int i;
        struct etmv4_drvdata *drvdata = info;
+       struct etmv4_config *config = &drvdata->config;
 
        CS_UNLOCK(drvdata->base);
 
@@ -104,2200 +114,288 @@ static void etm4_enable_hw(void *info)
                        "timeout observed when probing at offset %#x\n",
                        TRCSTATR);
 
-       writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
-       writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
+       writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
+       writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
        /* nothing specific implemented */
        writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
-       writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
-       writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
-       writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
-       writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
-       writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
-       writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
-       writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
+       writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
+       writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
+       writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
+       writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
+       writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
+       writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
+       writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
        writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
-       writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
-       writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
-       writel_relaxed(drvdata->vissctlr,
+       writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
+       writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
+       writel_relaxed(config->vissctlr,
                       drvdata->base + TRCVISSCTLR);
-       writel_relaxed(drvdata->vipcssctlr,
+       writel_relaxed(config->vipcssctlr,
                       drvdata->base + TRCVIPCSSCTLR);
        for (i = 0; i < drvdata->nrseqstate - 1; i++)
-               writel_relaxed(drvdata->seq_ctrl[i],
+               writel_relaxed(config->seq_ctrl[i],
                               drvdata->base + TRCSEQEVRn(i));
-       writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
-       writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
-       writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
+       writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
+       writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
+       writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
        for (i = 0; i < drvdata->nr_cntr; i++) {
-               writel_relaxed(drvdata->cntrldvr[i],
+               writel_relaxed(config->cntrldvr[i],
                               drvdata->base + TRCCNTRLDVRn(i));
-               writel_relaxed(drvdata->cntr_ctrl[i],
+               writel_relaxed(config->cntr_ctrl[i],
                               drvdata->base + TRCCNTCTLRn(i));
-               writel_relaxed(drvdata->cntr_val[i],
+               writel_relaxed(config->cntr_val[i],
                               drvdata->base + TRCCNTVRn(i));
        }
 
        /* Resource selector pair 0 is always implemented and reserved */
-       for (i = 2; i < drvdata->nr_resource * 2; i++)
-               writel_relaxed(drvdata->res_ctrl[i],
-                              drvdata->base + TRCRSCTLRn(i));
-
-       for (i = 0; i < drvdata->nr_ss_cmp; i++) {
-               writel_relaxed(drvdata->ss_ctrl[i],
-                              drvdata->base + TRCSSCCRn(i));
-               writel_relaxed(drvdata->ss_status[i],
-                              drvdata->base + TRCSSCSRn(i));
-               writel_relaxed(drvdata->ss_pe_cmp[i],
-                              drvdata->base + TRCSSPCICRn(i));
-       }
-       for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-               writeq_relaxed(drvdata->addr_val[i],
-                              drvdata->base + TRCACVRn(i));
-               writeq_relaxed(drvdata->addr_acc[i],
-                              drvdata->base + TRCACATRn(i));
-       }
-       for (i = 0; i < drvdata->numcidc; i++)
-               writeq_relaxed(drvdata->ctxid_pid[i],
-                              drvdata->base + TRCCIDCVRn(i));
-       writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
-       writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
-
-       for (i = 0; i < drvdata->numvmidc; i++)
-               writeq_relaxed(drvdata->vmid_val[i],
-                              drvdata->base + TRCVMIDCVRn(i));
-       writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
-       writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
-
-       /* Enable the trace unit */
-       writel_relaxed(1, drvdata->base + TRCPRGCTLR);
-
-       /* wait for TRCSTATR.IDLE to go back down to '0' */
-       if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
-               dev_err(drvdata->dev,
-                       "timeout observed when probing at offset %#x\n",
-                       TRCSTATR);
-
-       CS_LOCK(drvdata->base);
-
-       dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
-}
-
-static int etm4_enable(struct coresight_device *csdev)
-{
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-       int ret;
-
-       pm_runtime_get_sync(drvdata->dev);
-       spin_lock(&drvdata->spinlock);
-
-       /*
-        * Executing etm4_enable_hw on the cpu whose ETM is being enabled
-        * ensures that register writes occur when cpu is powered.
-        */
-       ret = smp_call_function_single(drvdata->cpu,
-                                      etm4_enable_hw, drvdata, 1);
-       if (ret)
-               goto err;
-       drvdata->enable = true;
-       drvdata->sticky_enable = true;
-
-       spin_unlock(&drvdata->spinlock);
-
-       dev_info(drvdata->dev, "ETM tracing enabled\n");
-       return 0;
-err:
-       spin_unlock(&drvdata->spinlock);
-       pm_runtime_put(drvdata->dev);
-       return ret;
-}
-
-static void etm4_disable_hw(void *info)
-{
-       u32 control;
-       struct etmv4_drvdata *drvdata = info;
-
-       CS_UNLOCK(drvdata->base);
-
-       control = readl_relaxed(drvdata->base + TRCPRGCTLR);
-
-       /* EN, bit[0] Trace unit enable bit */
-       control &= ~0x1;
-
-       /* make sure everything completes before disabling */
-       mb();
-       isb();
-       writel_relaxed(control, drvdata->base + TRCPRGCTLR);
-
-       CS_LOCK(drvdata->base);
-
-       dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
-}
-
-static void etm4_disable(struct coresight_device *csdev)
-{
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
-       /*
-        * Taking hotplug lock here protects from clocks getting disabled
-        * with tracing being left on (crash scenario) if user disable occurs
-        * after cpu online mask indicates the cpu is offline but before the
-        * DYING hotplug callback is serviced by the ETM driver.
-        */
-       get_online_cpus();
-       spin_lock(&drvdata->spinlock);
-
-       /*
-        * Executing etm4_disable_hw on the cpu whose ETM is being disabled
-        * ensures that register writes occur when cpu is powered.
-        */
-       smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
-       drvdata->enable = false;
-
-       spin_unlock(&drvdata->spinlock);
-       put_online_cpus();
-
-       pm_runtime_put(drvdata->dev);
-
-       dev_info(drvdata->dev, "ETM tracing disabled\n");
-}
-
-static const struct coresight_ops_source etm4_source_ops = {
-       .trace_id       = etm4_trace_id,
-       .enable         = etm4_enable,
-       .disable        = etm4_disable,
-};
-
-static const struct coresight_ops etm4_cs_ops = {
-       .source_ops     = &etm4_source_ops,
-};
-
-static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
-{
-       u8 idx = drvdata->addr_idx;
-
-       /*
-        * TRCACATRn.TYPE bit[1:0]: type of comparison
-        * the trace unit performs
-        */
-       if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
-               if (idx % 2 != 0)
-                       return -EINVAL;
-
-               /*
-                * We are performing instruction address comparison. Set the
-                * relevant bit of ViewInst Include/Exclude Control register
-                * for corresponding address comparator pair.
-                */
-               if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
-                   drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
-                       return -EINVAL;
-
-               if (exclude == true) {
-                       /*
-                        * Set exclude bit and unset the include bit
-                        * corresponding to comparator pair
-                        */
-                       drvdata->viiectlr |= BIT(idx / 2 + 16);
-                       drvdata->viiectlr &= ~BIT(idx / 2);
-               } else {
-                       /*
-                        * Set include bit and unset exclude bit
-                        * corresponding to comparator pair
-                        */
-                       drvdata->viiectlr |= BIT(idx / 2);
-                       drvdata->viiectlr &= ~BIT(idx / 2 + 16);
-               }
-       }
-       return 0;
-}
-
-static ssize_t nr_pe_cmp_show(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_pe_cmp;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_pe_cmp);
-
-static ssize_t nr_addr_cmp_show(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_addr_cmp;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_addr_cmp);
-
-static ssize_t nr_cntr_show(struct device *dev,
-                           struct device_attribute *attr,
-                           char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_cntr;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_cntr);
-
-static ssize_t nr_ext_inp_show(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_ext_inp;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ext_inp);
-
-static ssize_t numcidc_show(struct device *dev,
-                           struct device_attribute *attr,
-                           char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->numcidc;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(numcidc);
-
-static ssize_t numvmidc_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->numvmidc;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(numvmidc);
-
-static ssize_t nrseqstate_show(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nrseqstate;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nrseqstate);
-
-static ssize_t nr_resource_show(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_resource;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_resource);
-
-static ssize_t nr_ss_cmp_show(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_ss_cmp;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ss_cmp);
-
-static ssize_t reset_store(struct device *dev,
-                          struct device_attribute *attr,
-                          const char *buf, size_t size)
-{
-       int i;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       if (val)
-               drvdata->mode = 0x0;
-
-       /* Disable data tracing: do not trace load and store data transfers */
-       drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
-       drvdata->cfg &= ~(BIT(1) | BIT(2));
-
-       /* Disable data value and data address tracing */
-       drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
-                          ETM_MODE_DATA_TRACE_VAL);
-       drvdata->cfg &= ~(BIT(16) | BIT(17));
-
-       /* Disable all events tracing */
-       drvdata->eventctrl0 = 0x0;
-       drvdata->eventctrl1 = 0x0;
-
-       /* Disable timestamp event */
-       drvdata->ts_ctrl = 0x0;
-
-       /* Disable stalling */
-       drvdata->stall_ctrl = 0x0;
-
-       /* Reset trace synchronization period  to 2^8 = 256 bytes*/
-       if (drvdata->syncpr == false)
-               drvdata->syncfreq = 0x8;
-
-       /*
-        * Enable ViewInst to trace everything with start-stop logic in
-        * started state. ARM recommends start-stop logic is set before
-        * each trace run.
-        */
-       drvdata->vinst_ctrl |= BIT(0);
-       if (drvdata->nr_addr_cmp == true) {
-               drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
-               /* SSSTATUS, bit[9] */
-               drvdata->vinst_ctrl |= BIT(9);
-       }
-
-       /* No address range filtering for ViewInst */
-       drvdata->viiectlr = 0x0;
-
-       /* No start-stop filtering for ViewInst */
-       drvdata->vissctlr = 0x0;
-
-       /* Disable seq events */
-       for (i = 0; i < drvdata->nrseqstate-1; i++)
-               drvdata->seq_ctrl[i] = 0x0;
-       drvdata->seq_rst = 0x0;
-       drvdata->seq_state = 0x0;
-
-       /* Disable external input events */
-       drvdata->ext_inp = 0x0;
-
-       drvdata->cntr_idx = 0x0;
-       for (i = 0; i < drvdata->nr_cntr; i++) {
-               drvdata->cntrldvr[i] = 0x0;
-               drvdata->cntr_ctrl[i] = 0x0;
-               drvdata->cntr_val[i] = 0x0;
-       }
-
-       /* Resource selector pair 0 is always implemented and reserved */
-       drvdata->res_idx = 0x2;
-       for (i = 2; i < drvdata->nr_resource * 2; i++)
-               drvdata->res_ctrl[i] = 0x0;
-
-       for (i = 0; i < drvdata->nr_ss_cmp; i++) {
-               drvdata->ss_ctrl[i] = 0x0;
-               drvdata->ss_pe_cmp[i] = 0x0;
-       }
-
-       drvdata->addr_idx = 0x0;
-       for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
-               drvdata->addr_val[i] = 0x0;
-               drvdata->addr_acc[i] = 0x0;
-               drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
-       }
-
-       drvdata->ctxid_idx = 0x0;
-       for (i = 0; i < drvdata->numcidc; i++) {
-               drvdata->ctxid_pid[i] = 0x0;
-               drvdata->ctxid_vpid[i] = 0x0;
-       }
-
-       drvdata->ctxid_mask0 = 0x0;
-       drvdata->ctxid_mask1 = 0x0;
-
-       drvdata->vmid_idx = 0x0;
-       for (i = 0; i < drvdata->numvmidc; i++)
-               drvdata->vmid_val[i] = 0x0;
-       drvdata->vmid_mask0 = 0x0;
-       drvdata->vmid_mask1 = 0x0;
-
-       drvdata->trcid = drvdata->cpu + 1;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_WO(reset);
-
-static ssize_t mode_show(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->mode;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t mode_store(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf, size_t size)
-{
-       unsigned long val, mode;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->mode = val & ETMv4_MODE_ALL;
-
-       if (drvdata->mode & ETM_MODE_EXCLUDE)
-               etm4_set_mode_exclude(drvdata, true);
-       else
-               etm4_set_mode_exclude(drvdata, false);
-
-       if (drvdata->instrp0 == true) {
-               /* start by clearing instruction P0 field */
-               drvdata->cfg  &= ~(BIT(1) | BIT(2));
-               if (drvdata->mode & ETM_MODE_LOAD)
-                       /* 0b01 Trace load instructions as P0 instructions */
-                       drvdata->cfg  |= BIT(1);
-               if (drvdata->mode & ETM_MODE_STORE)
-                       /* 0b10 Trace store instructions as P0 instructions */
-                       drvdata->cfg  |= BIT(2);
-               if (drvdata->mode & ETM_MODE_LOAD_STORE)
-                       /*
-                        * 0b11 Trace load and store instructions
-                        * as P0 instructions
-                        */
-                       drvdata->cfg  |= BIT(1) | BIT(2);
-       }
-
-       /* bit[3], Branch broadcast mode */
-       if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
-               drvdata->cfg |= BIT(3);
-       else
-               drvdata->cfg &= ~BIT(3);
-
-       /* bit[4], Cycle counting instruction trace bit */
-       if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
-               (drvdata->trccci == true))
-               drvdata->cfg |= BIT(4);
-       else
-               drvdata->cfg &= ~BIT(4);
-
-       /* bit[6], Context ID tracing bit */
-       if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
-               drvdata->cfg |= BIT(6);
-       else
-               drvdata->cfg &= ~BIT(6);
-
-       if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
-               drvdata->cfg |= BIT(7);
-       else
-               drvdata->cfg &= ~BIT(7);
-
-       /* bits[10:8], Conditional instruction tracing bit */
-       mode = ETM_MODE_COND(drvdata->mode);
-       if (drvdata->trccond == true) {
-               drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
-               drvdata->cfg |= mode << 8;
-       }
-
-       /* bit[11], Global timestamp tracing bit */
-       if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
-               drvdata->cfg |= BIT(11);
-       else
-               drvdata->cfg &= ~BIT(11);
-
-       /* bit[12], Return stack enable bit */
-       if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
-               (drvdata->retstack == true))
-               drvdata->cfg |= BIT(12);
-       else
-               drvdata->cfg &= ~BIT(12);
-
-       /* bits[14:13], Q element enable field */
-       mode = ETM_MODE_QELEM(drvdata->mode);
-       /* start by clearing QE bits */
-       drvdata->cfg &= ~(BIT(13) | BIT(14));
-       /* if supported, Q elements with instruction counts are enabled */
-       if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
-               drvdata->cfg |= BIT(13);
-       /*
-        * if supported, Q elements with and without instruction
-        * counts are enabled
-        */
-       if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
-               drvdata->cfg |= BIT(14);
-
-       /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
-       if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
-           (drvdata->atbtrig == true))
-               drvdata->eventctrl1 |= BIT(11);
-       else
-               drvdata->eventctrl1 &= ~BIT(11);
-
-       /* bit[12], Low-power state behavior override bit */
-       if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
-           (drvdata->lpoverride == true))
-               drvdata->eventctrl1 |= BIT(12);
-       else
-               drvdata->eventctrl1 &= ~BIT(12);
-
-       /* bit[8], Instruction stall bit */
-       if (drvdata->mode & ETM_MODE_ISTALL_EN)
-               drvdata->stall_ctrl |= BIT(8);
-       else
-               drvdata->stall_ctrl &= ~BIT(8);
-
-       /* bit[10], Prioritize instruction trace bit */
-       if (drvdata->mode & ETM_MODE_INSTPRIO)
-               drvdata->stall_ctrl |= BIT(10);
-       else
-               drvdata->stall_ctrl &= ~BIT(10);
-
-       /* bit[13], Trace overflow prevention bit */
-       if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
-               (drvdata->nooverflow == true))
-               drvdata->stall_ctrl |= BIT(13);
-       else
-               drvdata->stall_ctrl &= ~BIT(13);
-
-       /* bit[9] Start/stop logic control bit */
-       if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
-               drvdata->vinst_ctrl |= BIT(9);
-       else
-               drvdata->vinst_ctrl &= ~BIT(9);
-
-       /* bit[10], Whether a trace unit must trace a Reset exception */
-       if (drvdata->mode & ETM_MODE_TRACE_RESET)
-               drvdata->vinst_ctrl |= BIT(10);
-       else
-               drvdata->vinst_ctrl &= ~BIT(10);
-
-       /* bit[11], Whether a trace unit must trace a system error exception */
-       if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
-               (drvdata->trc_error == true))
-               drvdata->vinst_ctrl |= BIT(11);
-       else
-               drvdata->vinst_ctrl &= ~BIT(11);
-
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(mode);
-
-static ssize_t pe_show(struct device *dev,
-                      struct device_attribute *attr,
-                      char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->pe_sel;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t pe_store(struct device *dev,
-                       struct device_attribute *attr,
-                       const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       if (val > drvdata->nr_pe) {
-               spin_unlock(&drvdata->spinlock);
-               return -EINVAL;
-       }
-
-       drvdata->pe_sel = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(pe);
-
-static ssize_t event_show(struct device *dev,
-                         struct device_attribute *attr,
-                         char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->eventctrl0;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t event_store(struct device *dev,
-                          struct device_attribute *attr,
-                          const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       switch (drvdata->nr_event) {
-       case 0x0:
-               /* EVENT0, bits[7:0] */
-               drvdata->eventctrl0 = val & 0xFF;
-               break;
-       case 0x1:
-                /* EVENT1, bits[15:8] */
-               drvdata->eventctrl0 = val & 0xFFFF;
-               break;
-       case 0x2:
-               /* EVENT2, bits[23:16] */
-               drvdata->eventctrl0 = val & 0xFFFFFF;
-               break;
-       case 0x3:
-               /* EVENT3, bits[31:24] */
-               drvdata->eventctrl0 = val;
-               break;
-       default:
-               break;
-       }
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(event);
-
-static ssize_t event_instren_show(struct device *dev,
-                                 struct device_attribute *attr,
-                                 char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = BMVAL(drvdata->eventctrl1, 0, 3);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t event_instren_store(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       /* start by clearing all instruction event enable bits */
-       drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
-       switch (drvdata->nr_event) {
-       case 0x0:
-               /* generate Event element for event 1 */
-               drvdata->eventctrl1 |= val & BIT(1);
-               break;
-       case 0x1:
-               /* generate Event element for event 1 and 2 */
-               drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
-               break;
-       case 0x2:
-               /* generate Event element for event 1, 2 and 3 */
-               drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
-               break;
-       case 0x3:
-               /* generate Event element for all 4 events */
-               drvdata->eventctrl1 |= val & 0xF;
-               break;
-       default:
-               break;
-       }
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(event_instren);
-
-static ssize_t event_ts_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->ts_ctrl;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t event_ts_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (!drvdata->ts_size)
-               return -EINVAL;
-
-       drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(event_ts);
-
-static ssize_t syncfreq_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->syncfreq;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t syncfreq_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (drvdata->syncpr == true)
-               return -EINVAL;
-
-       drvdata->syncfreq = val & ETMv4_SYNC_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(syncfreq);
-
-static ssize_t cyc_threshold_show(struct device *dev,
-                                 struct device_attribute *attr,
-                                 char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->ccctlr;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t cyc_threshold_store(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val < drvdata->ccitmin)
-               return -EINVAL;
-
-       drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(cyc_threshold);
-
-static ssize_t bb_ctrl_show(struct device *dev,
-                           struct device_attribute *attr,
-                           char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->bb_ctrl;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t bb_ctrl_store(struct device *dev,
-                            struct device_attribute *attr,
-                            const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (drvdata->trcbb == false)
-               return -EINVAL;
-       if (!drvdata->nr_addr_cmp)
-               return -EINVAL;
-       /*
-        * Bit[7:0] selects which address range comparator is used for
-        * branch broadcast control.
-        */
-       if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
-               return -EINVAL;
-
-       drvdata->bb_ctrl = val;
-       return size;
-}
-static DEVICE_ATTR_RW(bb_ctrl);
-
-static ssize_t event_vinst_show(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t event_vinst_store(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       val &= ETMv4_EVENT_MASK;
-       drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
-       drvdata->vinst_ctrl |= val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(event_vinst);
-
-static ssize_t s_exlevel_vinst_show(struct device *dev,
-                                   struct device_attribute *attr,
-                                   char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = BMVAL(drvdata->vinst_ctrl, 16, 19);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t s_exlevel_vinst_store(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
-       drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
-       /* enable instruction tracing for corresponding exception level */
-       val &= drvdata->s_ex_level;
-       drvdata->vinst_ctrl |= (val << 16);
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(s_exlevel_vinst);
-
-static ssize_t ns_exlevel_vinst_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       /* EXLEVEL_NS, bits[23:20] */
-       val = BMVAL(drvdata->vinst_ctrl, 20, 23);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t ns_exlevel_vinst_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       /* clear EXLEVEL_NS bits (bit[23] is never implemented */
-       drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
-       /* enable instruction tracing for corresponding exception level */
-       val &= drvdata->ns_ex_level;
-       drvdata->vinst_ctrl |= (val << 20);
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(ns_exlevel_vinst);
-
-static ssize_t addr_idx_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->addr_idx;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_idx_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val >= drvdata->nr_addr_cmp * 2)
-               return -EINVAL;
-
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->addr_idx = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(addr_idx);
-
-static ssize_t addr_instdatatype_show(struct device *dev,
-                                     struct device_attribute *attr,
-                                     char *buf)
-{
-       ssize_t len;
-       u8 val, idx;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       val = BMVAL(drvdata->addr_acc[idx], 0, 1);
-       len = scnprintf(buf, PAGE_SIZE, "%s\n",
-                       val == ETM_INSTR_ADDR ? "instr" :
-                       (val == ETM_DATA_LOAD_ADDR ? "data_load" :
-                       (val == ETM_DATA_STORE_ADDR ? "data_store" :
-                       "data_load_store")));
-       spin_unlock(&drvdata->spinlock);
-       return len;
-}
-
-static ssize_t addr_instdatatype_store(struct device *dev,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t size)
-{
-       u8 idx;
-       char str[20] = "";
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (strlen(buf) >= 20)
-               return -EINVAL;
-       if (sscanf(buf, "%s", str) != 1)
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!strcmp(str, "instr"))
-               /* TYPE, bits[1:0] */
-               drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
-
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(addr_instdatatype);
-
-static ssize_t addr_single_show(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       idx = drvdata->addr_idx;
-       spin_lock(&drvdata->spinlock);
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-       val = (unsigned long)drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_single_store(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = (u64)val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(addr_single);
-
-static ssize_t addr_range_show(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       u8 idx;
-       unsigned long val1, val2;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (idx % 2 != 0) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-       if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-             (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val1 = (unsigned long)drvdata->addr_val[idx];
-       val2 = (unsigned long)drvdata->addr_val[idx + 1];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
-}
-
-static ssize_t addr_range_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val1, val2;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
-               return -EINVAL;
-       /* lower address comparator cannot have a higher address value */
-       if (val1 > val2)
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (idx % 2 != 0) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-             (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = (u64)val1;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
-       drvdata->addr_val[idx + 1] = (u64)val2;
-       drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
-       /*
-        * Program include or exclude control bits for vinst or vdata
-        * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
-        */
-       if (drvdata->mode & ETM_MODE_EXCLUDE)
-               etm4_set_mode_exclude(drvdata, true);
-       else
-               etm4_set_mode_exclude(drvdata, false);
-
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(addr_range);
-
-static ssize_t addr_start_show(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val = (unsigned long)drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_start_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!drvdata->nr_addr_cmp) {
-               spin_unlock(&drvdata->spinlock);
-               return -EINVAL;
-       }
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = (u64)val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
-       drvdata->vissctlr |= BIT(idx);
-       /* SSSTATUS, bit[9] - turn on start/stop logic */
-       drvdata->vinst_ctrl |= BIT(9);
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(addr_start);
-
-static ssize_t addr_stop_show(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val = (unsigned long)drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_stop_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!drvdata->nr_addr_cmp) {
-               spin_unlock(&drvdata->spinlock);
-               return -EINVAL;
-       }
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-              drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = (u64)val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
-       drvdata->vissctlr |= BIT(idx + 16);
-       /* SSSTATUS, bit[9] - turn on start/stop logic */
-       drvdata->vinst_ctrl |= BIT(9);
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(addr_stop);
-
-static ssize_t addr_ctxtype_show(struct device *dev,
-                                struct device_attribute *attr,
-                                char *buf)
-{
-       ssize_t len;
-       u8 idx, val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       /* CONTEXTTYPE, bits[3:2] */
-       val = BMVAL(drvdata->addr_acc[idx], 2, 3);
-       len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
-                       (val == ETM_CTX_CTXID ? "ctxid" :
-                       (val == ETM_CTX_VMID ? "vmid" : "all")));
-       spin_unlock(&drvdata->spinlock);
-       return len;
-}
-
-static ssize_t addr_ctxtype_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       u8 idx;
-       char str[10] = "";
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (strlen(buf) >= 10)
-               return -EINVAL;
-       if (sscanf(buf, "%s", str) != 1)
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!strcmp(str, "none"))
-               /* start by clearing context type bits */
-               drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
-       else if (!strcmp(str, "ctxid")) {
-               /* 0b01 The trace unit performs a Context ID */
-               if (drvdata->numcidc) {
-                       drvdata->addr_acc[idx] |= BIT(2);
-                       drvdata->addr_acc[idx] &= ~BIT(3);
-               }
-       } else if (!strcmp(str, "vmid")) {
-               /* 0b10 The trace unit performs a VMID */
-               if (drvdata->numvmidc) {
-                       drvdata->addr_acc[idx] &= ~BIT(2);
-                       drvdata->addr_acc[idx] |= BIT(3);
-               }
-       } else if (!strcmp(str, "all")) {
-               /*
-                * 0b11 The trace unit performs a Context ID
-                * comparison and a VMID
-                */
-               if (drvdata->numcidc)
-                       drvdata->addr_acc[idx] |= BIT(2);
-               if (drvdata->numvmidc)
-                       drvdata->addr_acc[idx] |= BIT(3);
-       }
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(addr_ctxtype);
-
-static ssize_t addr_context_show(struct device *dev,
-                                struct device_attribute *attr,
-                                char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       /* context ID comparator bits[6:4] */
-       val = BMVAL(drvdata->addr_acc[idx], 4, 6);
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_context_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
-               return -EINVAL;
-       if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
-                    drvdata->numcidc : drvdata->numvmidc))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       /* clear context ID comparator bits[6:4] */
-       drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
-       drvdata->addr_acc[idx] |= (val << 4);
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(addr_context);
-
-static ssize_t seq_idx_show(struct device *dev,
-                           struct device_attribute *attr,
-                           char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->seq_idx;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t seq_idx_store(struct device *dev,
-                            struct device_attribute *attr,
-                            const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val >= drvdata->nrseqstate - 1)
-               return -EINVAL;
-
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->seq_idx = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(seq_idx);
-
-static ssize_t seq_state_show(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->seq_state;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t seq_state_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val >= drvdata->nrseqstate)
-               return -EINVAL;
-
-       drvdata->seq_state = val;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_state);
-
-static ssize_t seq_event_show(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->seq_idx;
-       val = drvdata->seq_ctrl[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t seq_event_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->seq_idx;
-       /* RST, bits[7:0] */
-       drvdata->seq_ctrl[idx] = val & 0xFF;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(seq_event);
-
-static ssize_t seq_reset_event_show(struct device *dev,
-                                   struct device_attribute *attr,
-                                   char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->seq_rst;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t seq_reset_event_store(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (!(drvdata->nrseqstate))
-               return -EINVAL;
-
-       drvdata->seq_rst = val & ETMv4_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_reset_event);
-
-static ssize_t cntr_idx_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->cntr_idx;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t cntr_idx_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val >= drvdata->nr_cntr)
-               return -EINVAL;
-
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_idx = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_idx);
-
-static ssize_t cntrldvr_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->cntr_idx;
-       val = drvdata->cntrldvr[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t cntrldvr_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val > ETM_CNTR_MAX_VAL)
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->cntr_idx;
-       drvdata->cntrldvr[idx] = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(cntrldvr);
-
-static ssize_t cntr_val_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       for (i = 0; i < drvdata->nr_resource * 2; i++)
+               writel_relaxed(config->res_ctrl[i],
+                              drvdata->base + TRCRSCTLRn(i));
 
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->cntr_idx;
-       val = drvdata->cntr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
+       for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+               writel_relaxed(config->ss_ctrl[i],
+                              drvdata->base + TRCSSCCRn(i));
+               writel_relaxed(config->ss_status[i],
+                              drvdata->base + TRCSSCSRn(i));
+               writel_relaxed(config->ss_pe_cmp[i],
+                              drvdata->base + TRCSSPCICRn(i));
+       }
+       for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+               writeq_relaxed(config->addr_val[i],
+                              drvdata->base + TRCACVRn(i));
+               writeq_relaxed(config->addr_acc[i],
+                              drvdata->base + TRCACATRn(i));
+       }
+       for (i = 0; i < drvdata->numcidc; i++)
+               writeq_relaxed(config->ctxid_pid[i],
+                              drvdata->base + TRCCIDCVRn(i));
+       writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
+       writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
 
-static ssize_t cntr_val_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       for (i = 0; i < drvdata->numvmidc; i++)
+               writeq_relaxed(config->vmid_val[i],
+                              drvdata->base + TRCVMIDCVRn(i));
+       writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
+       writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
 
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val > ETM_CNTR_MAX_VAL)
-               return -EINVAL;
+       /* Enable the trace unit */
+       writel_relaxed(1, drvdata->base + TRCPRGCTLR);
 
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->cntr_idx;
-       drvdata->cntr_val[idx] = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_val);
+       /* wait for TRCSTATR.IDLE to go back down to '0' */
+       if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
+               dev_err(drvdata->dev,
+                       "timeout observed when probing at offset %#x\n",
+                       TRCSTATR);
 
-static ssize_t cntr_ctrl_show(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       CS_LOCK(drvdata->base);
 
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->cntr_idx;
-       val = drvdata->cntr_ctrl[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+       dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 }
 
-static ssize_t cntr_ctrl_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
+static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
+                                  struct perf_event_attr *attr)
 {
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_config *config = &drvdata->config;
 
-       if (kstrtoul(buf, 16, &val))
+       if (!attr)
                return -EINVAL;
 
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->cntr_idx;
-       drvdata->cntr_ctrl[idx] = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_ctrl);
-
-static ssize_t res_idx_show(struct device *dev,
-                           struct device_attribute *attr,
-                           char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Clear configuration from previous run */
+       memset(config, 0, sizeof(struct etmv4_config));
 
-       val = drvdata->res_idx;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
+       if (attr->exclude_kernel)
+               config->mode = ETM_MODE_EXCL_KERN;
 
-static ssize_t res_idx_store(struct device *dev,
-                            struct device_attribute *attr,
-                            const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       if (attr->exclude_user)
+               config->mode = ETM_MODE_EXCL_USER;
 
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       /* Resource selector pair 0 is always implemented and reserved */
-       if (val < 2 || val >= drvdata->nr_resource * 2)
-               return -EINVAL;
+       /* Always start from the default config */
+       etm4_set_default(config);
 
        /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
+        * By default the tracers are configured to trace the whole address
+        * range.  Narrow the field only if requested by user space.
         */
-       spin_lock(&drvdata->spinlock);
-       drvdata->res_idx = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(res_idx);
+       if (config->mode)
+               etm4_config_trace_mode(config);
 
-static ssize_t res_ctrl_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Go from generic option to ETMv4 specifics */
+       if (attr->config & BIT(ETM_OPT_CYCACC))
+               config->cfg |= ETMv4_MODE_CYCACC;
+       if (attr->config & BIT(ETM_OPT_TS))
+               config->cfg |= ETMv4_MODE_TIMESTAMP;
 
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->res_idx;
-       val = drvdata->res_ctrl[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+       return 0;
 }
 
-static ssize_t res_ctrl_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
+static int etm4_enable_perf(struct coresight_device *csdev,
+                           struct perf_event_attr *attr)
 {
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       if (kstrtoul(buf, 16, &val))
+       if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
                return -EINVAL;
 
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->res_idx;
-       /* For odd idx pair inversal bit is RES0 */
-       if (idx % 2 != 0)
-               /* PAIRINV, bit[21] */
-               val &= ~BIT(21);
-       drvdata->res_ctrl[idx] = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(res_ctrl);
-
-static ssize_t ctxid_idx_show(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->ctxid_idx;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t ctxid_idx_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val >= drvdata->numcidc)
-               return -EINVAL;
+       /* Configure the tracer based on the session's specifics */
+       etm4_parse_event_config(drvdata, attr);
+       /* And enable it */
+       etm4_enable_hw(drvdata);
 
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->ctxid_idx = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
+       return 0;
 }
-static DEVICE_ATTR_RW(ctxid_idx);
 
-static ssize_t ctxid_pid_show(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
+static int etm4_enable_sysfs(struct coresight_device *csdev)
 {
-       u8 idx;
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       int ret;
 
        spin_lock(&drvdata->spinlock);
-       idx = drvdata->ctxid_idx;
-       val = (unsigned long)drvdata->ctxid_vpid[idx];
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t ctxid_pid_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long vpid, pid;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
        /*
-        * only implemented when ctxid tracing is enabled, i.e. at least one
-        * ctxid comparator is implemented and ctxid is greater than 0 bits
-        * in length
+        * Executing etm4_enable_hw on the cpu whose ETM is being enabled
+        * ensures that register writes occur when cpu is powered.
         */
-       if (!drvdata->ctxid_size || !drvdata->numcidc)
-               return -EINVAL;
-       if (kstrtoul(buf, 16, &vpid))
-               return -EINVAL;
-
-       pid = coresight_vpid_to_pid(vpid);
+       ret = smp_call_function_single(drvdata->cpu,
+                                      etm4_enable_hw, drvdata, 1);
+       if (ret)
+               goto err;
 
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->ctxid_idx;
-       drvdata->ctxid_pid[idx] = (u64)pid;
-       drvdata->ctxid_vpid[idx] = (u64)vpid;
+       drvdata->sticky_enable = true;
        spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(ctxid_pid);
 
-static ssize_t ctxid_masks_show(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       unsigned long val1, val2;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       dev_info(drvdata->dev, "ETM tracing enabled\n");
+       return 0;
 
-       spin_lock(&drvdata->spinlock);
-       val1 = drvdata->ctxid_mask0;
-       val2 = drvdata->ctxid_mask1;
+err:
        spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+       return ret;
 }
 
-static ssize_t ctxid_masks_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
+static int etm4_enable(struct coresight_device *csdev,
+                      struct perf_event_attr *attr, u32 mode)
 {
-       u8 i, j, maskbyte;
-       unsigned long val1, val2, mask;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       int ret;
+       u32 val;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       /*
-        * only implemented when ctxid tracing is enabled, i.e. at least one
-        * ctxid comparator is implemented and ctxid is greater than 0 bits
-        * in length
-        */
-       if (!drvdata->ctxid_size || !drvdata->numcidc)
-               return -EINVAL;
-       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
-               return -EINVAL;
+       val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
 
-       spin_lock(&drvdata->spinlock);
-       /*
-        * each byte[0..3] controls mask value applied to ctxid
-        * comparator[0..3]
-        */
-       switch (drvdata->numcidc) {
-       case 0x1:
-               /* COMP0, bits[7:0] */
-               drvdata->ctxid_mask0 = val1 & 0xFF;
-               break;
-       case 0x2:
-               /* COMP1, bits[15:8] */
-               drvdata->ctxid_mask0 = val1 & 0xFFFF;
-               break;
-       case 0x3:
-               /* COMP2, bits[23:16] */
-               drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
-               break;
-       case 0x4:
-                /* COMP3, bits[31:24] */
-               drvdata->ctxid_mask0 = val1;
-               break;
-       case 0x5:
-               /* COMP4, bits[7:0] */
-               drvdata->ctxid_mask0 = val1;
-               drvdata->ctxid_mask1 = val2 & 0xFF;
-               break;
-       case 0x6:
-               /* COMP5, bits[15:8] */
-               drvdata->ctxid_mask0 = val1;
-               drvdata->ctxid_mask1 = val2 & 0xFFFF;
-               break;
-       case 0x7:
-               /* COMP6, bits[23:16] */
-               drvdata->ctxid_mask0 = val1;
-               drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
+       /* Someone is already using the tracer */
+       if (val)
+               return -EBUSY;
+
+       switch (mode) {
+       case CS_MODE_SYSFS:
+               ret = etm4_enable_sysfs(csdev);
                break;
-       case 0x8:
-               /* COMP7, bits[31:24] */
-               drvdata->ctxid_mask0 = val1;
-               drvdata->ctxid_mask1 = val2;
+       case CS_MODE_PERF:
+               ret = etm4_enable_perf(csdev, attr);
                break;
        default:
-               break;
-       }
-       /*
-        * If software sets a mask bit to 1, it must program relevant byte
-        * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
-        * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
-        * of ctxid comparator0 value (corresponding to byte 0) register.
-        */
-       mask = drvdata->ctxid_mask0;
-       for (i = 0; i < drvdata->numcidc; i++) {
-               /* mask value of corresponding ctxid comparator */
-               maskbyte = mask & ETMv4_EVENT_MASK;
-               /*
-                * each bit corresponds to a byte of respective ctxid comparator
-                * value register
-                */
-               for (j = 0; j < 8; j++) {
-                       if (maskbyte & 1)
-                               drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
-                       maskbyte >>= 1;
-               }
-               /* Select the next ctxid comparator mask value */
-               if (i == 3)
-                       /* ctxid comparators[4-7] */
-                       mask = drvdata->ctxid_mask1;
-               else
-                       mask >>= 0x8;
+               ret = -EINVAL;
        }
 
-       spin_unlock(&drvdata->spinlock);
-       return size;
+       /* The tracer didn't start */
+       if (ret)
+               local_set(&drvdata->mode, CS_MODE_DISABLED);
+
+       return ret;
 }
-static DEVICE_ATTR_RW(ctxid_masks);
 
-static ssize_t vmid_idx_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
+static void etm4_disable_hw(void *info)
 {
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       u32 control;
+       struct etmv4_drvdata *drvdata = info;
 
-       val = drvdata->vmid_idx;
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
+       CS_UNLOCK(drvdata->base);
 
-static ssize_t vmid_idx_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       control = readl_relaxed(drvdata->base + TRCPRGCTLR);
 
-       if (kstrtoul(buf, 16, &val))
-               return -EINVAL;
-       if (val >= drvdata->numvmidc)
-               return -EINVAL;
+       /* EN, bit[0] Trace unit enable bit */
+       control &= ~0x1;
 
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->vmid_idx = val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(vmid_idx);
+       /* make sure everything completes before disabling */
+       mb();
+       isb();
+       writel_relaxed(control, drvdata->base + TRCPRGCTLR);
 
-static ssize_t vmid_val_show(struct device *dev,
-                            struct device_attribute *attr,
-                            char *buf)
-{
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       CS_LOCK(drvdata->base);
 
-       val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
-       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+       dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
 }
 
-static ssize_t vmid_val_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
+static int etm4_disable_perf(struct coresight_device *csdev)
 {
-       unsigned long val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       /*
-        * only implemented when vmid tracing is enabled, i.e. at least one
-        * vmid comparator is implemented and at least 8 bit vmid size
-        */
-       if (!drvdata->vmid_size || !drvdata->numvmidc)
-               return -EINVAL;
-       if (kstrtoul(buf, 16, &val))
+       if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
                return -EINVAL;
 
-       spin_lock(&drvdata->spinlock);
-       drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
-       spin_unlock(&drvdata->spinlock);
-       return size;
+       etm4_disable_hw(drvdata);
+       return 0;
 }
-static DEVICE_ATTR_RW(vmid_val);
 
-static ssize_t vmid_masks_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static void etm4_disable_sysfs(struct coresight_device *csdev)
 {
-       unsigned long val1, val2;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val1 = drvdata->vmid_mask0;
-       val2 = drvdata->vmid_mask1;
-       spin_unlock(&drvdata->spinlock);
-       return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
-}
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-static ssize_t vmid_masks_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       u8 i, j, maskbyte;
-       unsigned long val1, val2, mask;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
        /*
-        * only implemented when vmid tracing is enabled, i.e. at least one
-        * vmid comparator is implemented and at least 8 bit vmid size
+        * Taking hotplug lock here protects from clocks getting disabled
+        * with tracing being left on (crash scenario) if user disable occurs
+        * after cpu online mask indicates the cpu is offline but before the
+        * DYING hotplug callback is serviced by the ETM driver.
         */
-       if (!drvdata->vmid_size || !drvdata->numvmidc)
-               return -EINVAL;
-       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
-               return -EINVAL;
-
+       get_online_cpus();
        spin_lock(&drvdata->spinlock);
 
        /*
-        * each byte[0..3] controls mask value applied to vmid
-        * comparator[0..3]
+        * Executing etm4_disable_hw on the cpu whose ETM is being disabled
+        * ensures that register writes occur when cpu is powered.
         */
-       switch (drvdata->numvmidc) {
-       case 0x1:
-               /* COMP0, bits[7:0] */
-               drvdata->vmid_mask0 = val1 & 0xFF;
-               break;
-       case 0x2:
-               /* COMP1, bits[15:8] */
-               drvdata->vmid_mask0 = val1 & 0xFFFF;
-               break;
-       case 0x3:
-               /* COMP2, bits[23:16] */
-               drvdata->vmid_mask0 = val1 & 0xFFFFFF;
-               break;
-       case 0x4:
-               /* COMP3, bits[31:24] */
-               drvdata->vmid_mask0 = val1;
-               break;
-       case 0x5:
-               /* COMP4, bits[7:0] */
-               drvdata->vmid_mask0 = val1;
-               drvdata->vmid_mask1 = val2 & 0xFF;
-               break;
-       case 0x6:
-               /* COMP5, bits[15:8] */
-               drvdata->vmid_mask0 = val1;
-               drvdata->vmid_mask1 = val2 & 0xFFFF;
-               break;
-       case 0x7:
-               /* COMP6, bits[23:16] */
-               drvdata->vmid_mask0 = val1;
-               drvdata->vmid_mask1 = val2 & 0xFFFFFF;
-               break;
-       case 0x8:
-               /* COMP7, bits[31:24] */
-               drvdata->vmid_mask0 = val1;
-               drvdata->vmid_mask1 = val2;
-               break;
-       default:
-               break;
-       }
+       smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
 
-       /*
-        * If software sets a mask bit to 1, it must program relevant byte
-        * of vmid comparator value 0x0, otherwise behavior is unpredictable.
-        * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
-        * of vmid comparator0 value (corresponding to byte 0) register.
-        */
-       mask = drvdata->vmid_mask0;
-       for (i = 0; i < drvdata->numvmidc; i++) {
-               /* mask value of corresponding vmid comparator */
-               maskbyte = mask & ETMv4_EVENT_MASK;
-               /*
-                * each bit corresponds to a byte of respective vmid comparator
-                * value register
-                */
-               for (j = 0; j < 8; j++) {
-                       if (maskbyte & 1)
-                               drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
-                       maskbyte >>= 1;
-               }
-               /* Select the next vmid comparator mask value */
-               if (i == 3)
-                       /* vmid comparators[4-7] */
-                       mask = drvdata->vmid_mask1;
-               else
-                       mask >>= 0x8;
-       }
        spin_unlock(&drvdata->spinlock);
-       return size;
-}
-static DEVICE_ATTR_RW(vmid_masks);
-
-static ssize_t cpu_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
-{
-       int val;
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->cpu;
-       return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+       put_online_cpus();
 
+       dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
-static DEVICE_ATTR_RO(cpu);
-
-static struct attribute *coresight_etmv4_attrs[] = {
-       &dev_attr_nr_pe_cmp.attr,
-       &dev_attr_nr_addr_cmp.attr,
-       &dev_attr_nr_cntr.attr,
-       &dev_attr_nr_ext_inp.attr,
-       &dev_attr_numcidc.attr,
-       &dev_attr_numvmidc.attr,
-       &dev_attr_nrseqstate.attr,
-       &dev_attr_nr_resource.attr,
-       &dev_attr_nr_ss_cmp.attr,
-       &dev_attr_reset.attr,
-       &dev_attr_mode.attr,
-       &dev_attr_pe.attr,
-       &dev_attr_event.attr,
-       &dev_attr_event_instren.attr,
-       &dev_attr_event_ts.attr,
-       &dev_attr_syncfreq.attr,
-       &dev_attr_cyc_threshold.attr,
-       &dev_attr_bb_ctrl.attr,
-       &dev_attr_event_vinst.attr,
-       &dev_attr_s_exlevel_vinst.attr,
-       &dev_attr_ns_exlevel_vinst.attr,
-       &dev_attr_addr_idx.attr,
-       &dev_attr_addr_instdatatype.attr,
-       &dev_attr_addr_single.attr,
-       &dev_attr_addr_range.attr,
-       &dev_attr_addr_start.attr,
-       &dev_attr_addr_stop.attr,
-       &dev_attr_addr_ctxtype.attr,
-       &dev_attr_addr_context.attr,
-       &dev_attr_seq_idx.attr,
-       &dev_attr_seq_state.attr,
-       &dev_attr_seq_event.attr,
-       &dev_attr_seq_reset_event.attr,
-       &dev_attr_cntr_idx.attr,
-       &dev_attr_cntrldvr.attr,
-       &dev_attr_cntr_val.attr,
-       &dev_attr_cntr_ctrl.attr,
-       &dev_attr_res_idx.attr,
-       &dev_attr_res_ctrl.attr,
-       &dev_attr_ctxid_idx.attr,
-       &dev_attr_ctxid_pid.attr,
-       &dev_attr_ctxid_masks.attr,
-       &dev_attr_vmid_idx.attr,
-       &dev_attr_vmid_val.attr,
-       &dev_attr_vmid_masks.attr,
-       &dev_attr_cpu.attr,
-       NULL,
-};
 
-#define coresight_simple_func(name, offset)                            \
-static ssize_t name##_show(struct device *_dev,                                \
-                          struct device_attribute *attr, char *buf)    \
-{                                                                      \
-       struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent);  \
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
-                        readl_relaxed(drvdata->base + offset));        \
-}                                                                      \
-DEVICE_ATTR_RO(name)
-
-coresight_simple_func(trcoslsr, TRCOSLSR);
-coresight_simple_func(trcpdcr, TRCPDCR);
-coresight_simple_func(trcpdsr, TRCPDSR);
-coresight_simple_func(trclsr, TRCLSR);
-coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
-coresight_simple_func(trcdevid, TRCDEVID);
-coresight_simple_func(trcdevtype, TRCDEVTYPE);
-coresight_simple_func(trcpidr0, TRCPIDR0);
-coresight_simple_func(trcpidr1, TRCPIDR1);
-coresight_simple_func(trcpidr2, TRCPIDR2);
-coresight_simple_func(trcpidr3, TRCPIDR3);
-
-static struct attribute *coresight_etmv4_mgmt_attrs[] = {
-       &dev_attr_trcoslsr.attr,
-       &dev_attr_trcpdcr.attr,
-       &dev_attr_trcpdsr.attr,
-       &dev_attr_trclsr.attr,
-       &dev_attr_trcauthstatus.attr,
-       &dev_attr_trcdevid.attr,
-       &dev_attr_trcdevtype.attr,
-       &dev_attr_trcpidr0.attr,
-       &dev_attr_trcpidr1.attr,
-       &dev_attr_trcpidr2.attr,
-       &dev_attr_trcpidr3.attr,
-       NULL,
-};
+static void etm4_disable(struct coresight_device *csdev)
+{
+       u32 mode;
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-coresight_simple_func(trcidr0, TRCIDR0);
-coresight_simple_func(trcidr1, TRCIDR1);
-coresight_simple_func(trcidr2, TRCIDR2);
-coresight_simple_func(trcidr3, TRCIDR3);
-coresight_simple_func(trcidr4, TRCIDR4);
-coresight_simple_func(trcidr5, TRCIDR5);
-/* trcidr[6,7] are reserved */
-coresight_simple_func(trcidr8, TRCIDR8);
-coresight_simple_func(trcidr9, TRCIDR9);
-coresight_simple_func(trcidr10, TRCIDR10);
-coresight_simple_func(trcidr11, TRCIDR11);
-coresight_simple_func(trcidr12, TRCIDR12);
-coresight_simple_func(trcidr13, TRCIDR13);
-
-static struct attribute *coresight_etmv4_trcidr_attrs[] = {
-       &dev_attr_trcidr0.attr,
-       &dev_attr_trcidr1.attr,
-       &dev_attr_trcidr2.attr,
-       &dev_attr_trcidr3.attr,
-       &dev_attr_trcidr4.attr,
-       &dev_attr_trcidr5.attr,
-       /* trcidr[6,7] are reserved */
-       &dev_attr_trcidr8.attr,
-       &dev_attr_trcidr9.attr,
-       &dev_attr_trcidr10.attr,
-       &dev_attr_trcidr11.attr,
-       &dev_attr_trcidr12.attr,
-       &dev_attr_trcidr13.attr,
-       NULL,
-};
+       /*
+        * For as long as the tracer isn't disabled another entity can't
+        * change its status.  As such we can read the status here without
+        * fearing it will change under us.
+        */
+       mode = local_read(&drvdata->mode);
 
-static const struct attribute_group coresight_etmv4_group = {
-       .attrs = coresight_etmv4_attrs,
-};
+       switch (mode) {
+       case CS_MODE_DISABLED:
+               break;
+       case CS_MODE_SYSFS:
+               etm4_disable_sysfs(csdev);
+               break;
+       case CS_MODE_PERF:
+               etm4_disable_perf(csdev);
+               break;
+       }
 
-static const struct attribute_group coresight_etmv4_mgmt_group = {
-       .attrs = coresight_etmv4_mgmt_attrs,
-       .name = "mgmt",
-};
+       if (mode)
+               local_set(&drvdata->mode, CS_MODE_DISABLED);
+}
 
-static const struct attribute_group coresight_etmv4_trcidr_group = {
-       .attrs = coresight_etmv4_trcidr_attrs,
-       .name = "trcidr",
+static const struct coresight_ops_source etm4_source_ops = {
+       .cpu_id         = etm4_cpu_id,
+       .trace_id       = etm4_trace_id,
+       .enable         = etm4_enable,
+       .disable        = etm4_disable,
 };
 
-static const struct attribute_group *coresight_etmv4_groups[] = {
-       &coresight_etmv4_group,
-       &coresight_etmv4_mgmt_group,
-       &coresight_etmv4_trcidr_group,
-       NULL,
+static const struct coresight_ops etm4_cs_ops = {
+       .source_ops     = &etm4_source_ops,
 };
 
 static void etm4_init_arch_data(void *info)
@@ -2310,6 +408,9 @@ static void etm4_init_arch_data(void *info)
        u32 etmidr5;
        struct etmv4_drvdata *drvdata = info;
 
+       /* Make sure all registers are accessible */
+       etm4_os_unlock(drvdata);
+
        CS_UNLOCK(drvdata->base);
 
        /* find all capabilities of the tracing unit */
@@ -2461,93 +562,115 @@ static void etm4_init_arch_data(void *info)
        CS_LOCK(drvdata->base);
 }
 
-static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
+static void etm4_set_default(struct etmv4_config *config)
 {
-       int i;
+       if (WARN_ON_ONCE(!config))
+               return;
 
-       drvdata->pe_sel = 0x0;
-       drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
-                       ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
+       /*
+        * Make default initialisation trace everything
+        *
+        * Select the "always true" resource selector on the
+        * "Enablign Event" line and configure address range comparator
+        * '0' to trace all the possible address range.  From there
+        * configure the "include/exclude" engine to include address
+        * range comparator '0'.
+        */
 
        /* disable all events tracing */
-       drvdata->eventctrl0 = 0x0;
-       drvdata->eventctrl1 = 0x0;
+       config->eventctrl0 = 0x0;
+       config->eventctrl1 = 0x0;
 
        /* disable stalling */
-       drvdata->stall_ctrl = 0x0;
+       config->stall_ctrl = 0x0;
+
+       /* enable trace synchronization every 4096 bytes, if available */
+       config->syncfreq = 0xC;
 
        /* disable timestamp event */
-       drvdata->ts_ctrl = 0x0;
+       config->ts_ctrl = 0x0;
 
-       /* enable trace synchronization every 4096 bytes for trace */
-       if (drvdata->syncpr == false)
-               drvdata->syncfreq = 0xC;
+       /* TRCVICTLR::EVENT = 0x01, select the always on logic */
+       config->vinst_ctrl |= BIT(0);
 
        /*
-        *  enable viewInst to trace everything with start-stop logic in
-        *  started state
+        * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
+        * in the started state
         */
-       drvdata->vinst_ctrl |= BIT(0);
-       /* set initial state of start-stop logic */
-       if (drvdata->nr_addr_cmp)
-               drvdata->vinst_ctrl |= BIT(9);
+       config->vinst_ctrl |= BIT(9);
 
-       /* no address range filtering for ViewInst */
-       drvdata->viiectlr = 0x0;
-       /* no start-stop filtering for ViewInst */
-       drvdata->vissctlr = 0x0;
+       /*
+        * Configure address range comparator '0' to encompass all
+        * possible addresses.
+        */
 
-       /* disable seq events */
-       for (i = 0; i < drvdata->nrseqstate-1; i++)
-               drvdata->seq_ctrl[i] = 0x0;
-       drvdata->seq_rst = 0x0;
-       drvdata->seq_state = 0x0;
+       /* First half of default address comparator: start at address 0 */
+       config->addr_val[ETM_DEFAULT_ADDR_COMP] = 0x0;
+       /* trace instruction addresses */
+       config->addr_acc[ETM_DEFAULT_ADDR_COMP] &= ~(BIT(0) | BIT(1));
+       /* EXLEVEL_NS, bits[12:15], only trace application and kernel space */
+       config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= ETM_EXLEVEL_NS_HYP;
+       /* EXLEVEL_S, bits[11:8], don't trace anything in secure state */
+       config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= (ETM_EXLEVEL_S_APP |
+                                                   ETM_EXLEVEL_S_OS |
+                                                   ETM_EXLEVEL_S_HYP);
+       config->addr_type[ETM_DEFAULT_ADDR_COMP] = ETM_ADDR_TYPE_RANGE;
 
-       /* disable external input events */
-       drvdata->ext_inp = 0x0;
+       /*
+        * Second half of default address comparator: go all
+        * the way to the top.
+       */
+       config->addr_val[ETM_DEFAULT_ADDR_COMP + 1] = ~0x0;
+       /* trace instruction addresses */
+       config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] &= ~(BIT(0) | BIT(1));
+       /* Address comparator type must be equal for both halves */
+       config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] =
+                                       config->addr_acc[ETM_DEFAULT_ADDR_COMP];
+       config->addr_type[ETM_DEFAULT_ADDR_COMP + 1] = ETM_ADDR_TYPE_RANGE;
 
-       for (i = 0; i < drvdata->nr_cntr; i++) {
-               drvdata->cntrldvr[i] = 0x0;
-               drvdata->cntr_ctrl[i] = 0x0;
-               drvdata->cntr_val[i] = 0x0;
-       }
+       /*
+        * Configure the ViewInst function to filter on address range
+        * comparator '0'.
+        */
+       config->viiectlr = BIT(0);
 
-       /* Resource selector pair 0 is always implemented and reserved */
-       drvdata->res_idx = 0x2;
-       for (i = 2; i < drvdata->nr_resource * 2; i++)
-               drvdata->res_ctrl[i] = 0x0;
+       /* no start-stop filtering for ViewInst */
+       config->vissctlr = 0x0;
+}
 
-       for (i = 0; i < drvdata->nr_ss_cmp; i++) {
-               drvdata->ss_ctrl[i] = 0x0;
-               drvdata->ss_pe_cmp[i] = 0x0;
-       }
+void etm4_config_trace_mode(struct etmv4_config *config)
+{
+       u32 addr_acc, mode;
 
-       if (drvdata->nr_addr_cmp >= 1) {
-               drvdata->addr_val[0] = (unsigned long)_stext;
-               drvdata->addr_val[1] = (unsigned long)_etext;
-               drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
-               drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
-       }
+       mode = config->mode;
+       mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
 
-       for (i = 0; i < drvdata->numcidc; i++) {
-               drvdata->ctxid_pid[i] = 0x0;
-               drvdata->ctxid_vpid[i] = 0x0;
-       }
+       /* excluding kernel AND user space doesn't make sense */
+       WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
 
-       drvdata->ctxid_mask0 = 0x0;
-       drvdata->ctxid_mask1 = 0x0;
+       /* nothing to do if neither flags are set */
+       if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+               return;
 
-       for (i = 0; i < drvdata->numvmidc; i++)
-               drvdata->vmid_val[i] = 0x0;
-       drvdata->vmid_mask0 = 0x0;
-       drvdata->vmid_mask1 = 0x0;
+       addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
+       /* clear default config */
+       addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS);
 
        /*
-        * A trace ID value of 0 is invalid, so let's start at some
-        * random value that fits in 7 bits.  ETMv3.x has 0x10 so let's
-        * start at 0x20.
+        * EXLEVEL_NS, bits[15:12]
+        * The Exception levels are:
+        *   Bit[12] Exception level 0 - Application
+        *   Bit[13] Exception level 1 - OS
+        *   Bit[14] Exception level 2 - Hypervisor
+        *   Bit[15] Never implemented
         */
-       drvdata->trcid = 0x20 + drvdata->cpu;
+       if (mode & ETM_MODE_EXCL_KERN)
+               addr_acc |= ETM_EXLEVEL_NS_OS;
+       else
+               addr_acc |= ETM_EXLEVEL_NS_APP;
+
+       config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
+       config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
 }
 
 static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
@@ -2566,7 +689,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
                        etmdrvdata[cpu]->os_unlock = true;
                }
 
-               if (etmdrvdata[cpu]->enable)
+               if (local_read(&etmdrvdata[cpu]->mode))
                        etm4_enable_hw(etmdrvdata[cpu]);
                spin_unlock(&etmdrvdata[cpu]->spinlock);
                break;
@@ -2579,7 +702,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
 
        case CPU_DYING:
                spin_lock(&etmdrvdata[cpu]->spinlock);
-               if (etmdrvdata[cpu]->enable)
+               if (local_read(&etmdrvdata[cpu]->mode))
                        etm4_disable_hw(etmdrvdata[cpu]);
                spin_unlock(&etmdrvdata[cpu]->spinlock);
                break;
@@ -2592,6 +715,11 @@ static struct notifier_block etm4_cpu_notifier = {
        .notifier_call = etm4_cpu_callback,
 };
 
+static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
+{
+       drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
+}
+
 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
 {
        int ret;
@@ -2635,9 +763,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
        get_online_cpus();
        etmdrvdata[drvdata->cpu] = drvdata;
 
-       if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
-               drvdata->os_unlock = true;
-
        if (smp_call_function_single(drvdata->cpu,
                                etm4_init_arch_data,  drvdata, 1))
                dev_err(dev, "ETM arch init failed\n");
@@ -2651,9 +776,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
                ret = -EINVAL;
                goto err_arch_supported;
        }
-       etm4_init_default_data(drvdata);
 
-       pm_runtime_put(&adev->dev);
+       etm4_init_trace_id(drvdata);
+       etm4_set_default(&drvdata->config);
 
        desc->type = CORESIGHT_DEV_TYPE_SOURCE;
        desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -2664,9 +789,16 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
        drvdata->csdev = coresight_register(desc);
        if (IS_ERR(drvdata->csdev)) {
                ret = PTR_ERR(drvdata->csdev);
-               goto err_coresight_register;
+               goto err_arch_supported;
+       }
+
+       ret = etm_perf_symlink(drvdata->csdev, true);
+       if (ret) {
+               coresight_unregister(drvdata->csdev);
+               goto err_arch_supported;
        }
 
+       pm_runtime_put(&adev->dev);
        dev_info(dev, "%s initialized\n", (char *)id->data);
 
        if (boot_enable) {
@@ -2677,24 +809,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
        return 0;
 
 err_arch_supported:
-       pm_runtime_put(&adev->dev);
-err_coresight_register:
        if (--etm4_count == 0)
                unregister_hotcpu_notifier(&etm4_cpu_notifier);
        return ret;
 }
 
-static int etm4_remove(struct amba_device *adev)
-{
-       struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
-
-       coresight_unregister(drvdata->csdev);
-       if (--etm4_count == 0)
-               unregister_hotcpu_notifier(&etm4_cpu_notifier);
-
-       return 0;
-}
-
 static struct amba_id etm4_ids[] = {
        {       /* ETM 4.0 - Qualcomm */
                .id     = 0x0003b95d,
@@ -2706,16 +825,20 @@ static struct amba_id etm4_ids[] = {
                .mask   = 0x000fffff,
                .data   = "ETM 4.0",
        },
+       {       /* ETM 4.0 - A72, Maia, HiSilicon */
+               .id = 0x000bb95a,
+               .mask = 0x000fffff,
+               .data = "ETM 4.0",
+       },
        { 0, 0},
 };
 
 static struct amba_driver etm4x_driver = {
        .drv = {
                .name   = "coresight-etm4x",
+               .suppress_bind_attrs = true,
        },
        .probe          = etm4_probe,
-       .remove         = etm4_remove,
        .id_table       = etm4_ids,
 };
-
-module_amba_driver(etm4x_driver);
+builtin_amba_driver(etm4x_driver);
index c34100205ca948681c62cd45411623f8c5cba4a6..5359c5197c1d6e4ccbbabe666785448bd47bdeef 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef _CORESIGHT_CORESIGHT_ETM_H
 #define _CORESIGHT_CORESIGHT_ETM_H
 
+#include <asm/local.h>
 #include <linux/spinlock.h>
 #include "coresight-priv.h"
 
 #define ETM_MODE_TRACE_RESET           BIT(25)
 #define ETM_MODE_TRACE_ERR             BIT(26)
 #define ETM_MODE_VIEWINST_STARTSTOP    BIT(27)
-#define ETMv4_MODE_ALL                 0xFFFFFFF
+#define ETMv4_MODE_ALL                 (GENMASK(27, 0) | \
+                                        ETM_MODE_EXCL_KERN | \
+                                        ETM_MODE_EXCL_USER)
 
 #define TRCSTATR_IDLE_BIT              0
+#define ETM_DEFAULT_ADDR_COMP          0
+
+/* secure state access levels */
+#define ETM_EXLEVEL_S_APP              BIT(8)
+#define ETM_EXLEVEL_S_OS               BIT(9)
+#define ETM_EXLEVEL_S_NA               BIT(10)
+#define ETM_EXLEVEL_S_HYP              BIT(11)
+/* non-secure state access levels */
+#define ETM_EXLEVEL_NS_APP             BIT(12)
+#define ETM_EXLEVEL_NS_OS              BIT(13)
+#define ETM_EXLEVEL_NS_HYP             BIT(14)
+#define ETM_EXLEVEL_NS_NA              BIT(15)
 
 /**
- * struct etm4_drvdata - specifics associated to an ETM component
- * @base:       Memory mapped base address for this component.
- * @dev:        The device entity associated to this component.
- * @csdev:      Component vitals needed by the framework.
- * @spinlock:   Only one at a time pls.
- * @cpu:        The cpu this component is affined to.
- * @arch:       ETM version number.
- * @enable:    Is this ETM currently tracing.
- * @sticky_enable: true if ETM base configuration has been done.
- * @boot_enable:True if we should start tracing at boot time.
- * @os_unlock:  True if access to management registers is allowed.
- * @nr_pe:     The number of processing entity available for tracing.
- * @nr_pe_cmp: The number of processing entity comparator inputs that are
- *             available for tracing.
- * @nr_addr_cmp:Number of pairs of address comparators available
- *             as found in ETMIDR4 0-3.
- * @nr_cntr:    Number of counters as found in ETMIDR5 bit 28-30.
- * @nr_ext_inp: Number of external input.
- * @numcidc:   Number of contextID comparators.
- * @numvmidc:  Number of VMID comparators.
- * @nrseqstate: The number of sequencer states that are implemented.
- * @nr_event:  Indicates how many events the trace unit support.
- * @nr_resource:The number of resource selection pairs available for tracing.
- * @nr_ss_cmp: Number of single-shot comparator controls that are available.
+ * struct etmv4_config - configuration information related to an ETMv4
  * @mode:      Controls various modes supported by this ETM.
- * @trcid:     value of the current ID for this component.
- * @trcid_size: Indicates the trace ID width.
- * @instrp0:   Tracing of load and store instructions
- *             as P0 elements is supported.
- * @trccond:   If the trace unit supports conditional
- *             instruction tracing.
- * @retstack:  Indicates if the implementation supports a return stack.
- * @trc_error: Whether a trace unit can trace a system
- *             error exception.
- * @atbtrig:   If the implementation can support ATB triggers
- * @lpoverride:        If the implementation can support low-power state over.
  * @pe_sel:    Controls which PE to trace.
  * @cfg:       Controls the tracing options.
  * @eventctrl0: Controls the tracing of arbitrary events.
  * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects.
  * @stallctl:  If functionality that prevents trace unit buffer overflows
  *             is available.
- * @sysstall:  Does the system support stall control of the PE?
- * @nooverflow:        Indicate if overflow prevention is supported.
- * @stall_ctrl:        Enables trace unit functionality that prevents trace
- *             unit buffer overflows.
- * @ts_size:   Global timestamp size field.
  * @ts_ctrl:   Controls the insertion of global timestamps in the
  *             trace streams.
- * @syncpr:    Indicates if an implementation has a fixed
- *             synchronization period.
  * @syncfreq:  Controls how often trace synchronization requests occur.
- * @trccci:    Indicates if the trace unit supports cycle counting
- *             for instruction.
- * @ccsize:    Indicates the size of the cycle counter in bits.
- * @ccitmin:   minimum value that can be programmed in
  *             the TRCCCCTLR register.
  * @ccctlr:    Sets the threshold value for cycle counting.
- * @trcbb:     Indicates if the trace unit supports branch broadcast tracing.
- * @q_support: Q element support characteristics.
  * @vinst_ctrl:        Controls instruction trace filtering.
  * @viiectlr:  Set or read, the address range comparators.
  * @vissctlr:  Set, or read, the single address comparators that control the
  * @addr_acc:  Address comparator access type.
  * @addr_type: Current status of the comparator register.
  * @ctxid_idx: Context ID index selector.
- * @ctxid_size:        Size of the context ID field to consider.
  * @ctxid_pid: Value of the context ID comparator.
  * @ctxid_vpid:        Virtual PID seen by users if PID namespace is enabled, otherwise
  *             the same value of ctxid_pid.
  * @ctxid_mask0:Context ID comparator mask for comparator 0-3.
  * @ctxid_mask1:Context ID comparator mask for comparator 4-7.
  * @vmid_idx:  VM ID index selector.
- * @vmid_size: Size of the VM ID comparator to consider.
  * @vmid_val:  Value of the VM ID comparator.
  * @vmid_mask0:        VM ID comparator mask for comparator 0-3.
  * @vmid_mask1:        VM ID comparator mask for comparator 4-7.
- * @s_ex_level:        In secure state, indicates whether instruction tracing is
- *             supported for the corresponding Exception level.
- * @ns_ex_level:In non-secure state, indicates whether instruction tracing is
- *             supported for the corresponding Exception level.
  * @ext_inp:   External input selection.
  */
-struct etmv4_drvdata {
-       void __iomem                    *base;
-       struct device                   *dev;
-       struct coresight_device         *csdev;
-       spinlock_t                      spinlock;
-       int                             cpu;
-       u8                              arch;
-       bool                            enable;
-       bool                            sticky_enable;
-       bool                            boot_enable;
-       bool                            os_unlock;
-       u8                              nr_pe;
-       u8                              nr_pe_cmp;
-       u8                              nr_addr_cmp;
-       u8                              nr_cntr;
-       u8                              nr_ext_inp;
-       u8                              numcidc;
-       u8                              numvmidc;
-       u8                              nrseqstate;
-       u8                              nr_event;
-       u8                              nr_resource;
-       u8                              nr_ss_cmp;
+struct etmv4_config {
        u32                             mode;
-       u8                              trcid;
-       u8                              trcid_size;
-       bool                            instrp0;
-       bool                            trccond;
-       bool                            retstack;
-       bool                            trc_error;
-       bool                            atbtrig;
-       bool                            lpoverride;
        u32                             pe_sel;
        u32                             cfg;
        u32                             eventctrl0;
        u32                             eventctrl1;
-       bool                            stallctl;
-       bool                            sysstall;
-       bool                            nooverflow;
        u32                             stall_ctrl;
-       u8                              ts_size;
        u32                             ts_ctrl;
-       bool                            syncpr;
        u32                             syncfreq;
-       bool                            trccci;
-       u8                              ccsize;
-       u8                              ccitmin;
        u32                             ccctlr;
-       bool                            trcbb;
        u32                             bb_ctrl;
-       bool                            q_support;
        u32                             vinst_ctrl;
        u32                             viiectlr;
        u32                             vissctlr;
@@ -353,19 +276,119 @@ struct etmv4_drvdata {
        u64                             addr_acc[ETM_MAX_SINGLE_ADDR_CMP];
        u8                              addr_type[ETM_MAX_SINGLE_ADDR_CMP];
        u8                              ctxid_idx;
-       u8                              ctxid_size;
        u64                             ctxid_pid[ETMv4_MAX_CTXID_CMP];
        u64                             ctxid_vpid[ETMv4_MAX_CTXID_CMP];
        u32                             ctxid_mask0;
        u32                             ctxid_mask1;
        u8                              vmid_idx;
-       u8                              vmid_size;
        u64                             vmid_val[ETM_MAX_VMID_CMP];
        u32                             vmid_mask0;
        u32                             vmid_mask1;
+       u32                             ext_inp;
+};
+
+/**
+ * struct etm4_drvdata - specifics associated to an ETM component
+ * @base:       Memory mapped base address for this component.
+ * @dev:        The device entity associated to this component.
+ * @csdev:      Component vitals needed by the framework.
+ * @spinlock:   Only one at a time pls.
+ * @mode:      This tracer's mode, i.e sysFS, Perf or disabled.
+ * @cpu:        The cpu this component is affined to.
+ * @arch:       ETM version number.
+ * @nr_pe:     The number of processing entity available for tracing.
+ * @nr_pe_cmp: The number of processing entity comparator inputs that are
+ *             available for tracing.
+ * @nr_addr_cmp:Number of pairs of address comparators available
+ *             as found in ETMIDR4 0-3.
+ * @nr_cntr:    Number of counters as found in ETMIDR5 bit 28-30.
+ * @nr_ext_inp: Number of external input.
+ * @numcidc:   Number of contextID comparators.
+ * @numvmidc:  Number of VMID comparators.
+ * @nrseqstate: The number of sequencer states that are implemented.
+ * @nr_event:  Indicates how many events the trace unit support.
+ * @nr_resource:The number of resource selection pairs available for tracing.
+ * @nr_ss_cmp: Number of single-shot comparator controls that are available.
+ * @trcid:     value of the current ID for this component.
+ * @trcid_size: Indicates the trace ID width.
+ * @ts_size:   Global timestamp size field.
+ * @ctxid_size:        Size of the context ID field to consider.
+ * @vmid_size: Size of the VM ID comparator to consider.
+ * @ccsize:    Indicates the size of the cycle counter in bits.
+ * @ccitmin:   minimum value that can be programmed in
+ * @s_ex_level:        In secure state, indicates whether instruction tracing is
+ *             supported for the corresponding Exception level.
+ * @ns_ex_level:In non-secure state, indicates whether instruction tracing is
+ *             supported for the corresponding Exception level.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:True if we should start tracing at boot time.
+ * @os_unlock:  True if access to management registers is allowed.
+ * @instrp0:   Tracing of load and store instructions
+ *             as P0 elements is supported.
+ * @trcbb:     Indicates if the trace unit supports branch broadcast tracing.
+ * @trccond:   If the trace unit supports conditional
+ *             instruction tracing.
+ * @retstack:  Indicates if the implementation supports a return stack.
+ * @trccci:    Indicates if the trace unit supports cycle counting
+ *             for instruction.
+ * @q_support: Q element support characteristics.
+ * @trc_error: Whether a trace unit can trace a system
+ *             error exception.
+ * @syncpr:    Indicates if an implementation has a fixed
+ *             synchronization period.
+ * @stall_ctrl:        Enables trace unit functionality that prevents trace
+ *             unit buffer overflows.
+ * @sysstall:  Does the system support stall control of the PE?
+ * @nooverflow:        Indicate if overflow prevention is supported.
+ * @atbtrig:   If the implementation can support ATB triggers
+ * @lpoverride:        If the implementation can support low-power state over.
+ * @config:    structure holding configuration parameters.
+ */
+struct etmv4_drvdata {
+       void __iomem                    *base;
+       struct device                   *dev;
+       struct coresight_device         *csdev;
+       spinlock_t                      spinlock;
+       local_t                         mode;
+       int                             cpu;
+       u8                              arch;
+       u8                              nr_pe;
+       u8                              nr_pe_cmp;
+       u8                              nr_addr_cmp;
+       u8                              nr_cntr;
+       u8                              nr_ext_inp;
+       u8                              numcidc;
+       u8                              numvmidc;
+       u8                              nrseqstate;
+       u8                              nr_event;
+       u8                              nr_resource;
+       u8                              nr_ss_cmp;
+       u8                              trcid;
+       u8                              trcid_size;
+       u8                              ts_size;
+       u8                              ctxid_size;
+       u8                              vmid_size;
+       u8                              ccsize;
+       u8                              ccitmin;
        u8                              s_ex_level;
        u8                              ns_ex_level;
-       u32                             ext_inp;
+       u8                              q_support;
+       bool                            sticky_enable;
+       bool                            boot_enable;
+       bool                            os_unlock;
+       bool                            instrp0;
+       bool                            trcbb;
+       bool                            trccond;
+       bool                            retstack;
+       bool                            trccci;
+       bool                            trc_error;
+       bool                            syncpr;
+       bool                            stallctl;
+       bool                            sysstall;
+       bool                            nooverflow;
+       bool                            atbtrig;
+       bool                            lpoverride;
+       struct etmv4_config             config;
 };
 
 /* Address comparator access types */
@@ -391,4 +414,7 @@ enum etm_addr_type {
        ETM_ADDR_TYPE_START,
        ETM_ADDR_TYPE_STOP,
 };
+
+extern const struct attribute_group *coresight_etmv4_groups[];
+void etm4_config_trace_mode(struct etmv4_config *config);
 #endif
index 2e36bde7fcb41bbfe3972502be5d1d11aa4c49a5..05df789056ccfc823a103d3bc070bef1eaf46d85 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Funnel driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -69,7 +70,6 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
 {
        struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
        funnel_enable_hw(drvdata, inport);
 
        dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
@@ -95,7 +95,6 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
        struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
        funnel_disable_hw(drvdata, inport);
-       pm_runtime_put(drvdata->dev);
 
        dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
 }
@@ -222,15 +221,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
        if (IS_ERR(drvdata->csdev))
                return PTR_ERR(drvdata->csdev);
 
-       dev_info(dev, "FUNNEL initialized\n");
-       return 0;
-}
-
-static int funnel_remove(struct amba_device *adev)
-{
-       struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
-
-       coresight_unregister(drvdata->csdev);
        return 0;
 }
 
@@ -273,13 +263,9 @@ static struct amba_driver funnel_driver = {
                .name   = "coresight-funnel",
                .owner  = THIS_MODULE,
                .pm     = &funnel_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
        .probe          = funnel_probe,
-       .remove         = funnel_remove,
        .id_table       = funnel_ids,
 };
-
-module_amba_driver(funnel_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Funnel driver");
+builtin_amba_driver(funnel_driver);
index 62fcd98cc7cfc76798316c3a37a547063ed57955..ad975c58080d28b0a43034ff2938825262951322 100644 (file)
 #define TIMEOUT_US             100
 #define BMVAL(val, lsb, msb)   ((val & GENMASK(msb, lsb)) >> lsb)
 
+#define ETM_MODE_EXCL_KERN     BIT(30)
+#define ETM_MODE_EXCL_USER     BIT(31)
+
+#define coresight_simple_func(type, name, offset)                      \
+static ssize_t name##_show(struct device *_dev,                                \
+                          struct device_attribute *attr, char *buf)    \
+{                                                                      \
+       type *drvdata = dev_get_drvdata(_dev->parent);                  \
+       return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
+                        readl_relaxed(drvdata->base + offset));        \
+}                                                                      \
+static DEVICE_ATTR_RO(name)
+
+enum cs_mode {
+       CS_MODE_DISABLED,
+       CS_MODE_SYSFS,
+       CS_MODE_PERF,
+};
+
+/**
+ * struct cs_buffer - keep track of a recording session' specifics
+ * @cur:       index of the current buffer
+ * @nr_pages:  max number of pages granted to us
+ * @offset:    offset within the current buffer
+ * @data_size: how much we collected in this run
+ * @lost:      other than zero if we had a HW buffer wrap around
+ * @snapshot:  is this run in snapshot mode
+ * @data_pages:        a handle the ring buffer
+ */
+struct cs_buffers {
+       unsigned int            cur;
+       unsigned int            nr_pages;
+       unsigned long           offset;
+       local_t                 data_size;
+       local_t                 lost;
+       bool                    snapshot;
+       void                    **data_pages;
+};
+
 static inline void CS_LOCK(void __iomem *addr)
 {
        do {
@@ -52,6 +91,12 @@ static inline void CS_UNLOCK(void __iomem *addr)
        } while (0);
 }
 
+void coresight_disable_path(struct list_head *path);
+int coresight_enable_path(struct list_head *path, u32 mode);
+struct coresight_device *coresight_get_sink(struct list_head *path);
+struct list_head *coresight_build_path(struct coresight_device *csdev);
+void coresight_release_path(struct list_head *path);
+
 #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
 extern int etm_readl_cp14(u32 off, unsigned int *val);
 extern int etm_writel_cp14(u32 off, u32 val);
index 584059e9e8660f228f785cb87b9200e3b315d675..700f710e4bfa6cc4449d04b2842eaf377f3b9368 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/clk.h>
 #include <linux/coresight.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -48,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
 {
        struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
-
        CS_UNLOCK(drvdata->base);
 
        /*
@@ -86,8 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
 
        CS_LOCK(drvdata->base);
 
-       pm_runtime_put(drvdata->dev);
-
        dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
@@ -156,15 +151,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
        return 0;
 }
 
-static int replicator_remove(struct amba_device *adev)
-{
-       struct replicator_state *drvdata = amba_get_drvdata(adev);
-
-       pm_runtime_disable(&adev->dev);
-       coresight_unregister(drvdata->csdev);
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int replicator_runtime_suspend(struct device *dev)
 {
@@ -206,10 +192,9 @@ static struct amba_driver replicator_driver = {
        .drv = {
                .name   = "coresight-replicator-qcom",
                .pm     = &replicator_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
        .probe          = replicator_probe,
-       .remove         = replicator_remove,
        .id_table       = replicator_ids,
 };
-
-module_amba_driver(replicator_driver);
+builtin_amba_driver(replicator_driver);
index 963ac197c2535caf202960af34490e6abd02d4cb..c6982e312e156892eda2126ad061d23a73522939 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Replicator driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
@@ -41,7 +42,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
 {
        struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
        dev_info(drvdata->dev, "REPLICATOR enabled\n");
        return 0;
 }
@@ -51,7 +51,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
 {
        struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_put(drvdata->dev);
        dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
@@ -115,7 +114,6 @@ static int replicator_probe(struct platform_device *pdev)
 
        pm_runtime_put(&pdev->dev);
 
-       dev_info(dev, "REPLICATOR initialized\n");
        return 0;
 
 out_disable_pm:
@@ -127,20 +125,6 @@ out_disable_pm:
        return ret;
 }
 
-static int replicator_remove(struct platform_device *pdev)
-{
-       struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
-
-       coresight_unregister(drvdata->csdev);
-       pm_runtime_get_sync(&pdev->dev);
-       if (!IS_ERR(drvdata->atclk))
-               clk_disable_unprepare(drvdata->atclk);
-       pm_runtime_put_noidle(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int replicator_runtime_suspend(struct device *dev)
 {
@@ -175,15 +159,11 @@ static const struct of_device_id replicator_match[] = {
 
 static struct platform_driver replicator_driver = {
        .probe          = replicator_probe,
-       .remove         = replicator_remove,
        .driver         = {
                .name   = "coresight-replicator",
                .of_match_table = replicator_match,
                .pm     = &replicator_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
 };
-
 builtin_platform_driver(replicator_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
new file mode 100644 (file)
index 0000000..73be58a
--- /dev/null
@@ -0,0 +1,920 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight System Trace Macrocell driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Initial implementation by Pratik Patel
+ * (C) 2014-2015 Pratik Patel <pratikp@codeaurora.org>
+ *
+ * Serious refactoring, code cleanup and upgrading to the Coresight upstream
+ * framework by Mathieu Poirier
+ * (C) 2015-2016 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * Guaranteed timing and support for various packet type coming from the
+ * generic STM API by Chunyan Zhang
+ * (C) 2015-2016 Chunyan Zhang <zhang.chunyan@linaro.org>
+ */
+#include <asm/local.h>
+#include <linux/amba/bus.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/coresight-stm.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/perf_event.h>
+#include <linux/pm_runtime.h>
+#include <linux/stm.h>
+
+#include "coresight-priv.h"
+
+#define STMDMASTARTR                   0xc04
+#define STMDMASTOPR                    0xc08
+#define STMDMASTATR                    0xc0c
+#define STMDMACTLR                     0xc10
+#define STMDMAIDR                      0xcfc
+#define STMHEER                                0xd00
+#define STMHETER                       0xd20
+#define STMHEBSR                       0xd60
+#define STMHEMCR                       0xd64
+#define STMHEMASTR                     0xdf4
+#define STMHEFEAT1R                    0xdf8
+#define STMHEIDR                       0xdfc
+#define STMSPER                                0xe00
+#define STMSPTER                       0xe20
+#define STMPRIVMASKR                   0xe40
+#define STMSPSCR                       0xe60
+#define STMSPMSCR                      0xe64
+#define STMSPOVERRIDER                 0xe68
+#define STMSPMOVERRIDER                        0xe6c
+#define STMSPTRIGCSR                   0xe70
+#define STMTCSR                                0xe80
+#define STMTSSTIMR                     0xe84
+#define STMTSFREQR                     0xe8c
+#define STMSYNCR                       0xe90
+#define STMAUXCR                       0xe94
+#define STMSPFEAT1R                    0xea0
+#define STMSPFEAT2R                    0xea4
+#define STMSPFEAT3R                    0xea8
+#define STMITTRIGGER                   0xee8
+#define STMITATBDATA0                  0xeec
+#define STMITATBCTR2                   0xef0
+#define STMITATBID                     0xef4
+#define STMITATBCTR0                   0xef8
+
+#define STM_32_CHANNEL                 32
+#define BYTES_PER_CHANNEL              256
+#define STM_TRACE_BUF_SIZE             4096
+#define STM_SW_MASTER_END              127
+
+/* Register bit definition */
+#define STMTCSR_BUSY_BIT               23
+/* Reserve the first 10 channels for kernel usage */
+#define STM_CHANNEL_OFFSET             0
+
+enum stm_pkt_type {
+       STM_PKT_TYPE_DATA       = 0x98,
+       STM_PKT_TYPE_FLAG       = 0xE8,
+       STM_PKT_TYPE_TRIG       = 0xF8,
+};
+
+#define stm_channel_addr(drvdata, ch)  (drvdata->chs.base +    \
+                                       (ch * BYTES_PER_CHANNEL))
+#define stm_channel_off(type, opts)    (type & ~opts)
+
+static int boot_nr_channel;
+
+/*
+ * Not really modular but using module_param is the easiest way to
+ * remain consistent with existing use cases for now.
+ */
+module_param_named(
+       boot_nr_channel, boot_nr_channel, int, S_IRUGO
+);
+
+/**
+ * struct channel_space - central management entity for extended ports
+ * @base:              memory mapped base address where channels start.
+ * @guaraneed:         is the channel delivery guaranteed.
+ */
+struct channel_space {
+       void __iomem            *base;
+       unsigned long           *guaranteed;
+};
+
+/**
+ * struct stm_drvdata - specifics associated to an STM component
+ * @base:              memory mapped base address for this component.
+ * @dev:               the device entity associated to this component.
+ * @atclk:             optional clock for the core parts of the STM.
+ * @csdev:             component vitals needed by the framework.
+ * @spinlock:          only one at a time pls.
+ * @chs:               the channels accociated to this STM.
+ * @stm:               structure associated to the generic STM interface.
+ * @mode:              this tracer's mode, i.e sysFS, or disabled.
+ * @traceid:           value of the current ID for this component.
+ * @write_bytes:       Maximus bytes this STM can write at a time.
+ * @stmsper:           settings for register STMSPER.
+ * @stmspscr:          settings for register STMSPSCR.
+ * @numsp:             the total number of stimulus port support by this STM.
+ * @stmheer:           settings for register STMHEER.
+ * @stmheter:          settings for register STMHETER.
+ * @stmhebsr:          settings for register STMHEBSR.
+ */
+struct stm_drvdata {
+       void __iomem            *base;
+       struct device           *dev;
+       struct clk              *atclk;
+       struct coresight_device *csdev;
+       spinlock_t              spinlock;
+       struct channel_space    chs;
+       struct stm_data         stm;
+       local_t                 mode;
+       u8                      traceid;
+       u32                     write_bytes;
+       u32                     stmsper;
+       u32                     stmspscr;
+       u32                     numsp;
+       u32                     stmheer;
+       u32                     stmheter;
+       u32                     stmhebsr;
+};
+
+static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       writel_relaxed(drvdata->stmhebsr, drvdata->base + STMHEBSR);
+       writel_relaxed(drvdata->stmheter, drvdata->base + STMHETER);
+       writel_relaxed(drvdata->stmheer, drvdata->base + STMHEER);
+       writel_relaxed(0x01 |   /* Enable HW event tracing */
+                      0x04,    /* Error detection on event tracing */
+                      drvdata->base + STMHEMCR);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void stm_port_enable_hw(struct stm_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+       /* ATB trigger enable on direct writes to TRIG locations */
+       writel_relaxed(0x10,
+                      drvdata->base + STMSPTRIGCSR);
+       writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
+       writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void stm_enable_hw(struct stm_drvdata *drvdata)
+{
+       if (drvdata->stmheer)
+               stm_hwevent_enable_hw(drvdata);
+
+       stm_port_enable_hw(drvdata);
+
+       CS_UNLOCK(drvdata->base);
+
+       /* 4096 byte between synchronisation packets */
+       writel_relaxed(0xFFF, drvdata->base + STMSYNCR);
+       writel_relaxed((drvdata->traceid << 16 | /* trace id */
+                       0x02 |                   /* timestamp enable */
+                       0x01),                   /* global STM enable */
+                       drvdata->base + STMTCSR);
+
+       CS_LOCK(drvdata->base);
+}
+
+static int stm_enable(struct coresight_device *csdev,
+                     struct perf_event_attr *attr, u32 mode)
+{
+       u32 val;
+       struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       if (mode != CS_MODE_SYSFS)
+               return -EINVAL;
+
+       val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
+
+       /* Someone is already using the tracer */
+       if (val)
+               return -EBUSY;
+
+       pm_runtime_get_sync(drvdata->dev);
+
+       spin_lock(&drvdata->spinlock);
+       stm_enable_hw(drvdata);
+       spin_unlock(&drvdata->spinlock);
+
+       dev_info(drvdata->dev, "STM tracing enabled\n");
+       return 0;
+}
+
+static void stm_hwevent_disable_hw(struct stm_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       writel_relaxed(0x0, drvdata->base + STMHEMCR);
+       writel_relaxed(0x0, drvdata->base + STMHEER);
+       writel_relaxed(0x0, drvdata->base + STMHETER);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void stm_port_disable_hw(struct stm_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       writel_relaxed(0x0, drvdata->base + STMSPER);
+       writel_relaxed(0x0, drvdata->base + STMSPTRIGCSR);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void stm_disable_hw(struct stm_drvdata *drvdata)
+{
+       u32 val;
+
+       CS_UNLOCK(drvdata->base);
+
+       val = readl_relaxed(drvdata->base + STMTCSR);
+       val &= ~0x1; /* clear global STM enable [0] */
+       writel_relaxed(val, drvdata->base + STMTCSR);
+
+       CS_LOCK(drvdata->base);
+
+       stm_port_disable_hw(drvdata);
+       if (drvdata->stmheer)
+               stm_hwevent_disable_hw(drvdata);
+}
+
+static void stm_disable(struct coresight_device *csdev)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       /*
+        * For as long as the tracer isn't disabled another entity can't
+        * change its status.  As such we can read the status here without
+        * fearing it will change under us.
+        */
+       if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+               spin_lock(&drvdata->spinlock);
+               stm_disable_hw(drvdata);
+               spin_unlock(&drvdata->spinlock);
+
+               /* Wait until the engine has completely stopped */
+               coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0);
+
+               pm_runtime_put(drvdata->dev);
+
+               local_set(&drvdata->mode, CS_MODE_DISABLED);
+               dev_info(drvdata->dev, "STM tracing disabled\n");
+       }
+}
+
+static int stm_trace_id(struct coresight_device *csdev)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       return drvdata->traceid;
+}
+
+static const struct coresight_ops_source stm_source_ops = {
+       .trace_id       = stm_trace_id,
+       .enable         = stm_enable,
+       .disable        = stm_disable,
+};
+
+static const struct coresight_ops stm_cs_ops = {
+       .source_ops     = &stm_source_ops,
+};
+
+static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes)
+{
+       return ((unsigned long)addr & (write_bytes - 1));
+}
+
+static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes)
+{
+       u8 paload[8];
+
+       if (stm_addr_unaligned(data, write_bytes)) {
+               memcpy(paload, data, size);
+               data = paload;
+       }
+
+       /* now we are 64bit/32bit aligned */
+       switch (size) {
+#ifdef CONFIG_64BIT
+       case 8:
+               writeq_relaxed(*(u64 *)data, addr);
+               break;
+#endif
+       case 4:
+               writel_relaxed(*(u32 *)data, addr);
+               break;
+       case 2:
+               writew_relaxed(*(u16 *)data, addr);
+               break;
+       case 1:
+               writeb_relaxed(*(u8 *)data, addr);
+               break;
+       default:
+               break;
+       }
+}
+
+static int stm_generic_link(struct stm_data *stm_data,
+                           unsigned int master,  unsigned int channel)
+{
+       struct stm_drvdata *drvdata = container_of(stm_data,
+                                                  struct stm_drvdata, stm);
+       if (!drvdata || !drvdata->csdev)
+               return -EINVAL;
+
+       return coresight_enable(drvdata->csdev);
+}
+
+static void stm_generic_unlink(struct stm_data *stm_data,
+                              unsigned int master,  unsigned int channel)
+{
+       struct stm_drvdata *drvdata = container_of(stm_data,
+                                                  struct stm_drvdata, stm);
+       if (!drvdata || !drvdata->csdev)
+               return;
+
+       stm_disable(drvdata->csdev);
+}
+
+static long stm_generic_set_options(struct stm_data *stm_data,
+                                   unsigned int master,
+                                   unsigned int channel,
+                                   unsigned int nr_chans,
+                                   unsigned long options)
+{
+       struct stm_drvdata *drvdata = container_of(stm_data,
+                                                  struct stm_drvdata, stm);
+       if (!(drvdata && local_read(&drvdata->mode)))
+               return -EINVAL;
+
+       if (channel >= drvdata->numsp)
+               return -EINVAL;
+
+       switch (options) {
+       case STM_OPTION_GUARANTEED:
+               set_bit(channel, drvdata->chs.guaranteed);
+               break;
+
+       case STM_OPTION_INVARIANT:
+               clear_bit(channel, drvdata->chs.guaranteed);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static ssize_t stm_generic_packet(struct stm_data *stm_data,
+                                 unsigned int master,
+                                 unsigned int channel,
+                                 unsigned int packet,
+                                 unsigned int flags,
+                                 unsigned int size,
+                                 const unsigned char *payload)
+{
+       unsigned long ch_addr;
+       struct stm_drvdata *drvdata = container_of(stm_data,
+                                                  struct stm_drvdata, stm);
+
+       if (!(drvdata && local_read(&drvdata->mode)))
+               return 0;
+
+       if (channel >= drvdata->numsp)
+               return 0;
+
+       ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
+
+       flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
+       flags |= test_bit(channel, drvdata->chs.guaranteed) ?
+                          STM_FLAG_GUARANTEED : 0;
+
+       if (size > drvdata->write_bytes)
+               size = drvdata->write_bytes;
+       else
+               size = rounddown_pow_of_two(size);
+
+       switch (packet) {
+       case STP_PACKET_FLAG:
+               ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags);
+
+               /*
+                * The generic STM core sets a size of '0' on flag packets.
+                * As such send a flag packet of size '1' and tell the
+                * core we did so.
+                */
+               stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes);
+               size = 1;
+               break;
+
+       case STP_PACKET_DATA:
+               ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags);
+               stm_send((void *)ch_addr, payload, size,
+                               drvdata->write_bytes);
+               break;
+
+       default:
+               return -ENOTSUPP;
+       }
+
+       return size;
+}
+
+static ssize_t hwevent_enable_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val = drvdata->stmheer;
+
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t hwevent_enable_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+       int ret = 0;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return -EINVAL;
+
+       drvdata->stmheer = val;
+       /* HW event enable and trigger go hand in hand */
+       drvdata->stmheter = val;
+
+       return size;
+}
+static DEVICE_ATTR_RW(hwevent_enable);
+
+static ssize_t hwevent_select_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val = drvdata->stmhebsr;
+
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t hwevent_select_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+       int ret = 0;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return -EINVAL;
+
+       drvdata->stmhebsr = val;
+
+       return size;
+}
+static DEVICE_ATTR_RW(hwevent_select);
+
+static ssize_t port_select_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+
+       if (!local_read(&drvdata->mode)) {
+               val = drvdata->stmspscr;
+       } else {
+               spin_lock(&drvdata->spinlock);
+               val = readl_relaxed(drvdata->base + STMSPSCR);
+               spin_unlock(&drvdata->spinlock);
+       }
+
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t port_select_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t size)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val, stmsper;
+       int ret = 0;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->stmspscr = val;
+
+       if (local_read(&drvdata->mode)) {
+               CS_UNLOCK(drvdata->base);
+               /* Process as per ARM's TRM recommendation */
+               stmsper = readl_relaxed(drvdata->base + STMSPER);
+               writel_relaxed(0x0, drvdata->base + STMSPER);
+               writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
+               writel_relaxed(stmsper, drvdata->base + STMSPER);
+               CS_LOCK(drvdata->base);
+       }
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(port_select);
+
+static ssize_t port_enable_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+
+       if (!local_read(&drvdata->mode)) {
+               val = drvdata->stmsper;
+       } else {
+               spin_lock(&drvdata->spinlock);
+               val = readl_relaxed(drvdata->base + STMSPER);
+               spin_unlock(&drvdata->spinlock);
+       }
+
+       return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t port_enable_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t size)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+       int ret = 0;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->stmsper = val;
+
+       if (local_read(&drvdata->mode)) {
+               CS_UNLOCK(drvdata->base);
+               writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
+               CS_LOCK(drvdata->base);
+       }
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(port_enable);
+
+static ssize_t traceid_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->traceid;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       /* traceid field is 7bit wide on STM32 */
+       drvdata->traceid = val & 0x7f;
+       return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+#define coresight_stm_simple_func(name, offset)        \
+       coresight_simple_func(struct stm_drvdata, name, offset)
+
+coresight_stm_simple_func(tcsr, STMTCSR);
+coresight_stm_simple_func(tsfreqr, STMTSFREQR);
+coresight_stm_simple_func(syncr, STMSYNCR);
+coresight_stm_simple_func(sper, STMSPER);
+coresight_stm_simple_func(spter, STMSPTER);
+coresight_stm_simple_func(privmaskr, STMPRIVMASKR);
+coresight_stm_simple_func(spscr, STMSPSCR);
+coresight_stm_simple_func(spmscr, STMSPMSCR);
+coresight_stm_simple_func(spfeat1r, STMSPFEAT1R);
+coresight_stm_simple_func(spfeat2r, STMSPFEAT2R);
+coresight_stm_simple_func(spfeat3r, STMSPFEAT3R);
+coresight_stm_simple_func(devid, CORESIGHT_DEVID);
+
+static struct attribute *coresight_stm_attrs[] = {
+       &dev_attr_hwevent_enable.attr,
+       &dev_attr_hwevent_select.attr,
+       &dev_attr_port_enable.attr,
+       &dev_attr_port_select.attr,
+       &dev_attr_traceid.attr,
+       NULL,
+};
+
+static struct attribute *coresight_stm_mgmt_attrs[] = {
+       &dev_attr_tcsr.attr,
+       &dev_attr_tsfreqr.attr,
+       &dev_attr_syncr.attr,
+       &dev_attr_sper.attr,
+       &dev_attr_spter.attr,
+       &dev_attr_privmaskr.attr,
+       &dev_attr_spscr.attr,
+       &dev_attr_spmscr.attr,
+       &dev_attr_spfeat1r.attr,
+       &dev_attr_spfeat2r.attr,
+       &dev_attr_spfeat3r.attr,
+       &dev_attr_devid.attr,
+       NULL,
+};
+
+static const struct attribute_group coresight_stm_group = {
+       .attrs = coresight_stm_attrs,
+};
+
+static const struct attribute_group coresight_stm_mgmt_group = {
+       .attrs = coresight_stm_mgmt_attrs,
+       .name = "mgmt",
+};
+
+static const struct attribute_group *coresight_stm_groups[] = {
+       &coresight_stm_group,
+       &coresight_stm_mgmt_group,
+       NULL,
+};
+
+static int stm_get_resource_byname(struct device_node *np,
+                                  char *ch_base, struct resource *res)
+{
+       const char *name = NULL;
+       int index = 0, found = 0;
+
+       while (!of_property_read_string_index(np, "reg-names", index, &name)) {
+               if (strcmp(ch_base, name)) {
+                       index++;
+                       continue;
+               }
+
+               /* We have a match and @index is where it's at */
+               found = 1;
+               break;
+       }
+
+       if (!found)
+               return -EINVAL;
+
+       return of_address_to_resource(np, index, res);
+}
+
+static u32 stm_fundamental_data_size(struct stm_drvdata *drvdata)
+{
+       u32 stmspfeat2r;
+
+       if (!IS_ENABLED(CONFIG_64BIT))
+               return 4;
+
+       stmspfeat2r = readl_relaxed(drvdata->base + STMSPFEAT2R);
+
+       /*
+        * bit[15:12] represents the fundamental data size
+        * 0 - 32-bit data
+        * 1 - 64-bit data
+        */
+       return BMVAL(stmspfeat2r, 12, 15) ? 8 : 4;
+}
+
+static u32 stm_num_stimulus_port(struct stm_drvdata *drvdata)
+{
+       u32 numsp;
+
+       numsp = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+       /*
+        * NUMPS in STMDEVID is 17 bit long and if equal to 0x0,
+        * 32 stimulus ports are supported.
+        */
+       numsp &= 0x1ffff;
+       if (!numsp)
+               numsp = STM_32_CHANNEL;
+       return numsp;
+}
+
+static void stm_init_default_data(struct stm_drvdata *drvdata)
+{
+       /* Don't use port selection */
+       drvdata->stmspscr = 0x0;
+       /*
+        * Enable all channel regardless of their number.  When port
+        * selection isn't used (see above) STMSPER applies to all
+        * 32 channel group available, hence setting all 32 bits to 1
+        */
+       drvdata->stmsper = ~0x0;
+
+       /*
+        * The trace ID value for *ETM* tracers start at CPU_ID * 2 + 0x10 and
+        * anything equal to or higher than 0x70 is reserved.  Since 0x00 is
+        * also reserved the STM trace ID needs to be higher than 0x00 and
+        * lowner than 0x10.
+        */
+       drvdata->traceid = 0x1;
+
+       /* Set invariant transaction timing on all channels */
+       bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp);
+}
+
+static void stm_init_generic_data(struct stm_drvdata *drvdata)
+{
+       drvdata->stm.name = dev_name(drvdata->dev);
+
+       /*
+        * MasterIDs are assigned at HW design phase. As such the core is
+        * using a single master for interaction with this device.
+        */
+       drvdata->stm.sw_start = 1;
+       drvdata->stm.sw_end = 1;
+       drvdata->stm.hw_override = true;
+       drvdata->stm.sw_nchannels = drvdata->numsp;
+       drvdata->stm.packet = stm_generic_packet;
+       drvdata->stm.link = stm_generic_link;
+       drvdata->stm.unlink = stm_generic_unlink;
+       drvdata->stm.set_options = stm_generic_set_options;
+}
+
+static int stm_probe(struct amba_device *adev, const struct amba_id *id)
+{
+       int ret;
+       void __iomem *base;
+       unsigned long *guaranteed;
+       struct device *dev = &adev->dev;
+       struct coresight_platform_data *pdata = NULL;
+       struct stm_drvdata *drvdata;
+       struct resource *res = &adev->res;
+       struct resource ch_res;
+       size_t res_size, bitmap_size;
+       struct coresight_desc *desc;
+       struct device_node *np = adev->dev.of_node;
+
+       if (np) {
+               pdata = of_get_coresight_platform_data(dev, np);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+               adev->dev.platform_data = pdata;
+       }
+       drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       drvdata->dev = &adev->dev;
+       drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+       if (!IS_ERR(drvdata->atclk)) {
+               ret = clk_prepare_enable(drvdata->atclk);
+               if (ret)
+                       return ret;
+       }
+       dev_set_drvdata(dev, drvdata);
+
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+       drvdata->base = base;
+
+       ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res);
+       if (ret)
+               return ret;
+
+       base = devm_ioremap_resource(dev, &ch_res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+       drvdata->chs.base = base;
+
+       drvdata->write_bytes = stm_fundamental_data_size(drvdata);
+
+       if (boot_nr_channel) {
+               drvdata->numsp = boot_nr_channel;
+               res_size = min((resource_size_t)(boot_nr_channel *
+                                 BYTES_PER_CHANNEL), resource_size(res));
+       } else {
+               drvdata->numsp = stm_num_stimulus_port(drvdata);
+               res_size = min((resource_size_t)(drvdata->numsp *
+                                BYTES_PER_CHANNEL), resource_size(res));
+       }
+       bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
+
+       guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
+       if (!guaranteed)
+               return -ENOMEM;
+       drvdata->chs.guaranteed = guaranteed;
+
+       spin_lock_init(&drvdata->spinlock);
+
+       stm_init_default_data(drvdata);
+       stm_init_generic_data(drvdata);
+
+       if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) {
+               dev_info(dev,
+                        "stm_register_device failed, probing deffered\n");
+               return -EPROBE_DEFER;
+       }
+
+       desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+       if (!desc) {
+               ret = -ENOMEM;
+               goto stm_unregister;
+       }
+
+       desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+       desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
+       desc->ops = &stm_cs_ops;
+       desc->pdata = pdata;
+       desc->dev = dev;
+       desc->groups = coresight_stm_groups;
+       drvdata->csdev = coresight_register(desc);
+       if (IS_ERR(drvdata->csdev)) {
+               ret = PTR_ERR(drvdata->csdev);
+               goto stm_unregister;
+       }
+
+       pm_runtime_put(&adev->dev);
+
+       dev_info(dev, "%s initialized\n", (char *)id->data);
+       return 0;
+
+stm_unregister:
+       stm_unregister_device(&drvdata->stm);
+       return ret;
+}
+
+#ifdef CONFIG_PM
+static int stm_runtime_suspend(struct device *dev)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev);
+
+       if (drvdata && !IS_ERR(drvdata->atclk))
+               clk_disable_unprepare(drvdata->atclk);
+
+       return 0;
+}
+
+static int stm_runtime_resume(struct device *dev)
+{
+       struct stm_drvdata *drvdata = dev_get_drvdata(dev);
+
+       if (drvdata && !IS_ERR(drvdata->atclk))
+               clk_prepare_enable(drvdata->atclk);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm_dev_pm_ops = {
+       SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
+};
+
+static struct amba_id stm_ids[] = {
+       {
+               .id     = 0x0003b962,
+               .mask   = 0x0003ffff,
+               .data   = "STM32",
+       },
+       { 0, 0},
+};
+
+static struct amba_driver stm_driver = {
+       .drv = {
+               .name   = "coresight-stm",
+               .owner  = THIS_MODULE,
+               .pm     = &stm_dev_pm_ops,
+               .suppress_bind_attrs = true,
+       },
+       .probe          = stm_probe,
+       .id_table       = stm_ids,
+};
+
+builtin_amba_driver(stm_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
new file mode 100644 (file)
index 0000000..466af86
--- /dev/null
@@ -0,0 +1,604 @@
+/*
+ * Copyright(C) 2016 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/coresight.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       /* Wait for TMCSReady bit to be set */
+       tmc_wait_for_tmcready(drvdata);
+
+       writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
+                      TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
+                      TMC_FFCR_TRIGON_TRIGIN,
+                      drvdata->base + TMC_FFCR);
+
+       writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+       tmc_enable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
+{
+       char *bufp;
+       u32 read_data;
+       int i;
+
+       bufp = drvdata->buf;
+       while (1) {
+               for (i = 0; i < drvdata->memwidth; i++) {
+                       read_data = readl_relaxed(drvdata->base + TMC_RRD);
+                       if (read_data == 0xFFFFFFFF)
+                               return;
+                       memcpy(bufp, &read_data, 4);
+                       bufp += 4;
+               }
+       }
+}
+
+static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       tmc_flush_and_stop(drvdata);
+       /*
+        * When operating in sysFS mode the content of the buffer needs to be
+        * read before the TMC is disabled.
+        */
+       if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+               tmc_etb_dump_hw(drvdata);
+       tmc_disable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       /* Wait for TMCSReady bit to be set */
+       tmc_wait_for_tmcready(drvdata);
+
+       writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
+       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
+                      drvdata->base + TMC_FFCR);
+       writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
+       tmc_enable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       tmc_flush_and_stop(drvdata);
+       tmc_disable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
+{
+       int ret = 0;
+       bool used = false;
+       char *buf = NULL;
+       long val;
+       unsigned long flags;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+        /* This shouldn't be happening */
+       if (WARN_ON(mode != CS_MODE_SYSFS))
+               return -EINVAL;
+
+       /*
+        * If we don't have a buffer release the lock and allocate memory.
+        * Otherwise keep the lock and move along.
+        */
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (!drvdata->buf) {
+               spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+               /* Allocating the memory here while outside of the spinlock */
+               buf = kzalloc(drvdata->size, GFP_KERNEL);
+               if (!buf)
+                       return -ENOMEM;
+
+               /* Let's try again */
+               spin_lock_irqsave(&drvdata->spinlock, flags);
+       }
+
+       if (drvdata->reading) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       val = local_xchg(&drvdata->mode, mode);
+       /*
+        * In sysFS mode we can have multiple writers per sink.  Since this
+        * sink is already enabled no memory is needed and the HW need not be
+        * touched.
+        */
+       if (val == CS_MODE_SYSFS)
+               goto out;
+
+       /*
+        * If drvdata::buf isn't NULL, memory was allocated for a previous
+        * trace run but wasn't read.  If so simply zero-out the memory.
+        * Otherwise use the memory allocated above.
+        *
+        * The memory is freed when users read the buffer using the
+        * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
+        * details.
+        */
+       if (drvdata->buf) {
+               memset(drvdata->buf, 0, drvdata->size);
+       } else {
+               used = true;
+               drvdata->buf = buf;
+       }
+
+       tmc_etb_enable_hw(drvdata);
+out:
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       /* Free memory outside the spinlock if need be */
+       if (!used && buf)
+               kfree(buf);
+
+       if (!ret)
+               dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+
+       return ret;
+}
+
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode)
+{
+       int ret = 0;
+       long val;
+       unsigned long flags;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+        /* This shouldn't be happening */
+       if (WARN_ON(mode != CS_MODE_PERF))
+               return -EINVAL;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       val = local_xchg(&drvdata->mode, mode);
+       /*
+        * In Perf mode there can be only one writer per sink.  There
+        * is also no need to continue if the ETB/ETR is already operated
+        * from sysFS.
+        */
+       if (val != CS_MODE_DISABLED) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       tmc_etb_enable_hw(drvdata);
+out:
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       return ret;
+}
+
+static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
+{
+       switch (mode) {
+       case CS_MODE_SYSFS:
+               return tmc_enable_etf_sink_sysfs(csdev, mode);
+       case CS_MODE_PERF:
+               return tmc_enable_etf_sink_perf(csdev, mode);
+       }
+
+       /* We shouldn't be here */
+       return -EINVAL;
+}
+
+static void tmc_disable_etf_sink(struct coresight_device *csdev)
+{
+       long val;
+       unsigned long flags;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading) {
+               spin_unlock_irqrestore(&drvdata->spinlock, flags);
+               return;
+       }
+
+       val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
+       /* Disable the TMC only if it needs to */
+       if (val != CS_MODE_DISABLED)
+               tmc_etb_disable_hw(drvdata);
+
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
+}
+
+static int tmc_enable_etf_link(struct coresight_device *csdev,
+                              int inport, int outport)
+{
+       unsigned long flags;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading) {
+               spin_unlock_irqrestore(&drvdata->spinlock, flags);
+               return -EBUSY;
+       }
+
+       tmc_etf_enable_hw(drvdata);
+       local_set(&drvdata->mode, CS_MODE_SYSFS);
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "TMC-ETF enabled\n");
+       return 0;
+}
+
+static void tmc_disable_etf_link(struct coresight_device *csdev,
+                                int inport, int outport)
+{
+       unsigned long flags;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading) {
+               spin_unlock_irqrestore(&drvdata->spinlock, flags);
+               return;
+       }
+
+       tmc_etf_disable_hw(drvdata);
+       local_set(&drvdata->mode, CS_MODE_DISABLED);
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "TMC disabled\n");
+}
+
+static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
+                                 void **pages, int nr_pages, bool overwrite)
+{
+       int node;
+       struct cs_buffers *buf;
+
+       if (cpu == -1)
+               cpu = smp_processor_id();
+       node = cpu_to_node(cpu);
+
+       /* Allocate memory structure for interaction with Perf */
+       buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+       if (!buf)
+               return NULL;
+
+       buf->snapshot = overwrite;
+       buf->nr_pages = nr_pages;
+       buf->data_pages = pages;
+
+       return buf;
+}
+
+static void tmc_free_etf_buffer(void *config)
+{
+       struct cs_buffers *buf = config;
+
+       kfree(buf);
+}
+
+static int tmc_set_etf_buffer(struct coresight_device *csdev,
+                             struct perf_output_handle *handle,
+                             void *sink_config)
+{
+       int ret = 0;
+       unsigned long head;
+       struct cs_buffers *buf = sink_config;
+
+       /* wrap head around to the amount of space we have */
+       head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+
+       /* find the page to write to */
+       buf->cur = head / PAGE_SIZE;
+
+       /* and offset within that page */
+       buf->offset = head % PAGE_SIZE;
+
+       local_set(&buf->data_size, 0);
+
+       return ret;
+}
+
+static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
+                                         struct perf_output_handle *handle,
+                                         void *sink_config, bool *lost)
+{
+       long size = 0;
+       struct cs_buffers *buf = sink_config;
+
+       if (buf) {
+               /*
+                * In snapshot mode ->data_size holds the new address of the
+                * ring buffer's head.  The size itself is the whole address
+                * range since we want the latest information.
+                */
+               if (buf->snapshot)
+                       handle->head = local_xchg(&buf->data_size,
+                                                 buf->nr_pages << PAGE_SHIFT);
+               /*
+                * Tell the tracer PMU how much we got in this run and if
+                * something went wrong along the way.  Nobody else can use
+                * this cs_buffers instance until we are done.  As such
+                * resetting parameters here and squaring off with the ring
+                * buffer API in the tracer PMU is fine.
+                */
+               *lost = !!local_xchg(&buf->lost, 0);
+               size = local_xchg(&buf->data_size, 0);
+       }
+
+       return size;
+}
+
+static void tmc_update_etf_buffer(struct coresight_device *csdev,
+                                 struct perf_output_handle *handle,
+                                 void *sink_config)
+{
+       int i, cur;
+       u32 *buf_ptr;
+       u32 read_ptr, write_ptr;
+       u32 status, to_read;
+       unsigned long offset;
+       struct cs_buffers *buf = sink_config;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       if (!buf)
+               return;
+
+       /* This shouldn't happen */
+       if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
+               return;
+
+       CS_UNLOCK(drvdata->base);
+
+       tmc_flush_and_stop(drvdata);
+
+       read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
+       write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
+
+       /*
+        * Get a hold of the status register and see if a wrap around
+        * has occurred.  If so adjust things accordingly.
+        */
+       status = readl_relaxed(drvdata->base + TMC_STS);
+       if (status & TMC_STS_FULL) {
+               local_inc(&buf->lost);
+               to_read = drvdata->size;
+       } else {
+               to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
+       }
+
+       /*
+        * The TMC RAM buffer may be bigger than the space available in the
+        * perf ring buffer (handle->size).  If so advance the RRP so that we
+        * get the latest trace data.
+        */
+       if (to_read > handle->size) {
+               u32 mask = 0;
+
+               /*
+                * The value written to RRP must be byte-address aligned to
+                * the width of the trace memory databus _and_ to a frame
+                * boundary (16 byte), whichever is the biggest. For example,
+                * for 32-bit, 64-bit and 128-bit wide trace memory, the four
+                * LSBs must be 0s. For 256-bit wide trace memory, the five
+                * LSBs must be 0s.
+                */
+               switch (drvdata->memwidth) {
+               case TMC_MEM_INTF_WIDTH_32BITS:
+               case TMC_MEM_INTF_WIDTH_64BITS:
+               case TMC_MEM_INTF_WIDTH_128BITS:
+                       mask = GENMASK(31, 5);
+                       break;
+               case TMC_MEM_INTF_WIDTH_256BITS:
+                       mask = GENMASK(31, 6);
+                       break;
+               }
+
+               /*
+                * Make sure the new size is aligned in accordance with the
+                * requirement explained above.
+                */
+               to_read = handle->size & mask;
+               /* Move the RAM read pointer up */
+               read_ptr = (write_ptr + drvdata->size) - to_read;
+               /* Make sure we are still within our limits */
+               if (read_ptr > (drvdata->size - 1))
+                       read_ptr -= drvdata->size;
+               /* Tell the HW */
+               writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
+               local_inc(&buf->lost);
+       }
+
+       cur = buf->cur;
+       offset = buf->offset;
+
+       /* for every byte to read */
+       for (i = 0; i < to_read; i += 4) {
+               buf_ptr = buf->data_pages[cur] + offset;
+               *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
+
+               offset += 4;
+               if (offset >= PAGE_SIZE) {
+                       offset = 0;
+                       cur++;
+                       /* wrap around at the end of the buffer */
+                       cur &= buf->nr_pages - 1;
+               }
+       }
+
+       /*
+        * In snapshot mode all we have to do is communicate to
+        * perf_aux_output_end() the address of the current head.  In full
+        * trace mode the same function expects a size to move rb->aux_head
+        * forward.
+        */
+       if (buf->snapshot)
+               local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
+       else
+               local_add(to_read, &buf->data_size);
+
+       CS_LOCK(drvdata->base);
+}
+
+static const struct coresight_ops_sink tmc_etf_sink_ops = {
+       .enable         = tmc_enable_etf_sink,
+       .disable        = tmc_disable_etf_sink,
+       .alloc_buffer   = tmc_alloc_etf_buffer,
+       .free_buffer    = tmc_free_etf_buffer,
+       .set_buffer     = tmc_set_etf_buffer,
+       .reset_buffer   = tmc_reset_etf_buffer,
+       .update_buffer  = tmc_update_etf_buffer,
+};
+
+static const struct coresight_ops_link tmc_etf_link_ops = {
+       .enable         = tmc_enable_etf_link,
+       .disable        = tmc_disable_etf_link,
+};
+
+const struct coresight_ops tmc_etb_cs_ops = {
+       .sink_ops       = &tmc_etf_sink_ops,
+};
+
+const struct coresight_ops tmc_etf_cs_ops = {
+       .sink_ops       = &tmc_etf_sink_ops,
+       .link_ops       = &tmc_etf_link_ops,
+};
+
+int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
+{
+       long val;
+       enum tmc_mode mode;
+       int ret = 0;
+       unsigned long flags;
+
+       /* config types are set a boot time and never change */
+       if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
+                        drvdata->config_type != TMC_CONFIG_TYPE_ETF))
+               return -EINVAL;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       if (drvdata->reading) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       /* There is no point in reading a TMC in HW FIFO mode */
+       mode = readl_relaxed(drvdata->base + TMC_MODE);
+       if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       val = local_read(&drvdata->mode);
+       /* Don't interfere if operated from Perf */
+       if (val == CS_MODE_PERF) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* If drvdata::buf is NULL the trace data has been read already */
+       if (drvdata->buf == NULL) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Disable the TMC if need be */
+       if (val == CS_MODE_SYSFS)
+               tmc_etb_disable_hw(drvdata);
+
+       drvdata->reading = true;
+out:
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       return ret;
+}
+
+int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
+{
+       char *buf = NULL;
+       enum tmc_mode mode;
+       unsigned long flags;
+
+       /* config types are set a boot time and never change */
+       if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
+                        drvdata->config_type != TMC_CONFIG_TYPE_ETF))
+               return -EINVAL;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       /* There is no point in reading a TMC in HW FIFO mode */
+       mode = readl_relaxed(drvdata->base + TMC_MODE);
+       if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+               spin_unlock_irqrestore(&drvdata->spinlock, flags);
+               return -EINVAL;
+       }
+
+       /* Re-enable the TMC if need be */
+       if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+               /*
+                * The trace run will continue with the same allocated trace
+                * buffer. As such zero-out the buffer so that we don't end
+                * up with stale data.
+                *
+                * Since the tracer is still enabled drvdata::buf
+                * can't be NULL.
+                */
+               memset(drvdata->buf, 0, drvdata->size);
+               tmc_etb_enable_hw(drvdata);
+       } else {
+               /*
+                * The ETB/ETF is not tracing and the buffer was just read.
+                * As such prepare to free the trace buffer.
+                */
+               buf = drvdata->buf;
+               drvdata->buf = NULL;
+       }
+
+       drvdata->reading = false;
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       /*
+        * Free allocated memory outside of the spinlock.  There is no need
+        * to assert the validity of 'buf' since calling kfree(NULL) is safe.
+        */
+       kfree(buf);
+
+       return 0;
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
new file mode 100644 (file)
index 0000000..847d1b5
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * Copyright(C) 2016 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/coresight.h>
+#include <linux/dma-mapping.h>
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+{
+       u32 axictl;
+
+       /* Zero out the memory to help with debug */
+       memset(drvdata->vaddr, 0, drvdata->size);
+
+       CS_UNLOCK(drvdata->base);
+
+       /* Wait for TMCSReady bit to be set */
+       tmc_wait_for_tmcready(drvdata);
+
+       writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
+       writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+
+       axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
+       axictl |= TMC_AXICTL_WR_BURST_16;
+       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+       axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
+       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+       axictl = (axictl &
+                 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
+                 TMC_AXICTL_PROT_CTL_B1;
+       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+
+       writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
+       writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
+       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
+                      TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
+                      TMC_FFCR_TRIGON_TRIGIN,
+                      drvdata->base + TMC_FFCR);
+       writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+       tmc_enable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
+{
+       u32 rwp, val;
+
+       rwp = readl_relaxed(drvdata->base + TMC_RWP);
+       val = readl_relaxed(drvdata->base + TMC_STS);
+
+       /* How much memory do we still have */
+       if (val & BIT(0))
+               drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
+       else
+               drvdata->buf = drvdata->vaddr;
+}
+
+static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       tmc_flush_and_stop(drvdata);
+       /*
+        * When operating in sysFS mode the content of the buffer needs to be
+        * read before the TMC is disabled.
+        */
+       if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+               tmc_etr_dump_hw(drvdata);
+       tmc_disable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
+{
+       int ret = 0;
+       bool used = false;
+       long val;
+       unsigned long flags;
+       void __iomem *vaddr = NULL;
+       dma_addr_t paddr;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+        /* This shouldn't be happening */
+       if (WARN_ON(mode != CS_MODE_SYSFS))
+               return -EINVAL;
+
+       /*
+        * If we don't have a buffer release the lock and allocate memory.
+        * Otherwise keep the lock and move along.
+        */
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (!drvdata->vaddr) {
+               spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+               /*
+                * Contiguous  memory can't be allocated while a spinlock is
+                * held.  As such allocate memory here and free it if a buffer
+                * has already been allocated (from a previous session).
+                */
+               vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
+                                          &paddr, GFP_KERNEL);
+               if (!vaddr)
+                       return -ENOMEM;
+
+               /* Let's try again */
+               spin_lock_irqsave(&drvdata->spinlock, flags);
+       }
+
+       if (drvdata->reading) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       val = local_xchg(&drvdata->mode, mode);
+       /*
+        * In sysFS mode we can have multiple writers per sink.  Since this
+        * sink is already enabled no memory is needed and the HW need not be
+        * touched.
+        */
+       if (val == CS_MODE_SYSFS)
+               goto out;
+
+       /*
+        * If drvdata::buf == NULL, use the memory allocated above.
+        * Otherwise a buffer still exists from a previous session, so
+        * simply use that.
+        */
+       if (drvdata->buf == NULL) {
+               used = true;
+               drvdata->vaddr = vaddr;
+               drvdata->paddr = paddr;
+               drvdata->buf = drvdata->vaddr;
+       }
+
+       memset(drvdata->vaddr, 0, drvdata->size);
+
+       tmc_etr_enable_hw(drvdata);
+out:
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       /* Free memory outside the spinlock if need be */
+       if (!used && vaddr)
+               dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+
+       if (!ret)
+               dev_info(drvdata->dev, "TMC-ETR enabled\n");
+
+       return ret;
+}
+
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
+{
+       int ret = 0;
+       long val;
+       unsigned long flags;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+        /* This shouldn't be happening */
+       if (WARN_ON(mode != CS_MODE_PERF))
+               return -EINVAL;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       val = local_xchg(&drvdata->mode, mode);
+       /*
+        * In Perf mode there can be only one writer per sink.  There
+        * is also no need to continue if the ETR is already operated
+        * from sysFS.
+        */
+       if (val != CS_MODE_DISABLED) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       tmc_etr_enable_hw(drvdata);
+out:
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       return ret;
+}
+
+static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
+{
+       switch (mode) {
+       case CS_MODE_SYSFS:
+               return tmc_enable_etr_sink_sysfs(csdev, mode);
+       case CS_MODE_PERF:
+               return tmc_enable_etr_sink_perf(csdev, mode);
+       }
+
+       /* We shouldn't be here */
+       return -EINVAL;
+}
+
+static void tmc_disable_etr_sink(struct coresight_device *csdev)
+{
+       long val;
+       unsigned long flags;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading) {
+               spin_unlock_irqrestore(&drvdata->spinlock, flags);
+               return;
+       }
+
+       val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
+       /* Disable the TMC only if it needs to */
+       if (val != CS_MODE_DISABLED)
+               tmc_etr_disable_hw(drvdata);
+
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "TMC-ETR disabled\n");
+}
+
+static const struct coresight_ops_sink tmc_etr_sink_ops = {
+       .enable         = tmc_enable_etr_sink,
+       .disable        = tmc_disable_etr_sink,
+};
+
+const struct coresight_ops tmc_etr_cs_ops = {
+       .sink_ops       = &tmc_etr_sink_ops,
+};
+
+int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
+{
+       int ret = 0;
+       long val;
+       unsigned long flags;
+
+       /* config types are set a boot time and never change */
+       if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
+               return -EINVAL;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       val = local_read(&drvdata->mode);
+       /* Don't interfere if operated from Perf */
+       if (val == CS_MODE_PERF) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* If drvdata::buf is NULL the trace data has been read already */
+       if (drvdata->buf == NULL) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Disable the TMC if need be */
+       if (val == CS_MODE_SYSFS)
+               tmc_etr_disable_hw(drvdata);
+
+       drvdata->reading = true;
+out:
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       return ret;
+}
+
+int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
+{
+       unsigned long flags;
+       dma_addr_t paddr;
+       void __iomem *vaddr = NULL;
+
+       /* config types are set a boot time and never change */
+       if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
+               return -EINVAL;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       /* RE-enable the TMC if need be */
+       if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+               /*
+                * The trace run will continue with the same allocated trace
+                * buffer. As such zero-out the buffer so that we don't end
+                * up with stale data.
+                *
+                * Since the tracer is still enabled drvdata::buf
+                * can't be NULL.
+                */
+               memset(drvdata->buf, 0, drvdata->size);
+               tmc_etr_enable_hw(drvdata);
+       } else {
+               /*
+                * The ETR is not tracing and the buffer was just read.
+                * As such prepare to free the trace buffer.
+                */
+               vaddr = drvdata->vaddr;
+               paddr = drvdata->paddr;
+               drvdata->buf = NULL;
+       }
+
+       drvdata->reading = false;
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       /* Free allocated memory out side of the spinlock */
+       if (vaddr)
+               dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+
+       return 0;
+}
index a57c7ec1661f915f9d7bc680cefc3558129d299a..9e02ac963cd0e3fbb38f9ee5e3fd082719f38bf7 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Trace Memory Controller driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/amba/bus.h>
 
 #include "coresight-priv.h"
+#include "coresight-tmc.h"
 
-#define TMC_RSZ                        0x004
-#define TMC_STS                        0x00c
-#define TMC_RRD                        0x010
-#define TMC_RRP                        0x014
-#define TMC_RWP                        0x018
-#define TMC_TRG                        0x01c
-#define TMC_CTL                        0x020
-#define TMC_RWD                        0x024
-#define TMC_MODE               0x028
-#define TMC_LBUFLEVEL          0x02c
-#define TMC_CBUFLEVEL          0x030
-#define TMC_BUFWM              0x034
-#define TMC_RRPHI              0x038
-#define TMC_RWPHI              0x03c
-#define TMC_AXICTL             0x110
-#define TMC_DBALO              0x118
-#define TMC_DBAHI              0x11c
-#define TMC_FFSR               0x300
-#define TMC_FFCR               0x304
-#define TMC_PSCR               0x308
-#define TMC_ITMISCOP0          0xee0
-#define TMC_ITTRFLIN           0xee8
-#define TMC_ITATBDATA0         0xeec
-#define TMC_ITATBCTR2          0xef0
-#define TMC_ITATBCTR1          0xef4
-#define TMC_ITATBCTR0          0xef8
-
-/* register description */
-/* TMC_CTL - 0x020 */
-#define TMC_CTL_CAPT_EN                BIT(0)
-/* TMC_STS - 0x00C */
-#define TMC_STS_TRIGGERED      BIT(1)
-/* TMC_AXICTL - 0x110 */
-#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
-#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
-#define TMC_AXICTL_SCT_GAT_MODE        BIT(7)
-#define TMC_AXICTL_WR_BURST_LEN 0xF00
-/* TMC_FFCR - 0x304 */
-#define TMC_FFCR_EN_FMT                BIT(0)
-#define TMC_FFCR_EN_TI         BIT(1)
-#define TMC_FFCR_FON_FLIN      BIT(4)
-#define TMC_FFCR_FON_TRIG_EVT  BIT(5)
-#define TMC_FFCR_FLUSHMAN      BIT(6)
-#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
-#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
-
-#define TMC_STS_TRIGGERED_BIT  2
-#define TMC_FFCR_FLUSHMAN_BIT  6
-
-enum tmc_config_type {
-       TMC_CONFIG_TYPE_ETB,
-       TMC_CONFIG_TYPE_ETR,
-       TMC_CONFIG_TYPE_ETF,
-};
-
-enum tmc_mode {
-       TMC_MODE_CIRCULAR_BUFFER,
-       TMC_MODE_SOFTWARE_FIFO,
-       TMC_MODE_HARDWARE_FIFO,
-};
-
-enum tmc_mem_intf_width {
-       TMC_MEM_INTF_WIDTH_32BITS       = 0x2,
-       TMC_MEM_INTF_WIDTH_64BITS       = 0x3,
-       TMC_MEM_INTF_WIDTH_128BITS      = 0x4,
-       TMC_MEM_INTF_WIDTH_256BITS      = 0x5,
-};
-
-/**
- * struct tmc_drvdata - specifics associated to an TMC component
- * @base:      memory mapped base address for this component.
- * @dev:       the device entity associated to this component.
- * @csdev:     component vitals needed by the framework.
- * @miscdev:   specifics to handle "/dev/xyz.tmc" entry.
- * @spinlock:  only one at a time pls.
- * @read_count:        manages preparation of buffer for reading.
- * @buf:       area of memory where trace data get sent.
- * @paddr:     DMA start location in RAM.
- * @vaddr:     virtual representation of @paddr.
- * @size:      @buf size.
- * @enable:    this TMC is being used.
- * @config_type: TMC variant, must be of type @tmc_config_type.
- * @trigger_cntr: amount of words to store after a trigger.
- */
-struct tmc_drvdata {
-       void __iomem            *base;
-       struct device           *dev;
-       struct coresight_device *csdev;
-       struct miscdevice       miscdev;
-       spinlock_t              spinlock;
-       int                     read_count;
-       bool                    reading;
-       char                    *buf;
-       dma_addr_t              paddr;
-       void __iomem            *vaddr;
-       u32                     size;
-       bool                    enable;
-       enum tmc_config_type    config_type;
-       u32                     trigger_cntr;
-};
-
-static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
+void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
 {
        /* Ensure formatter, unformatter and hardware fifo are empty */
        if (coresight_timeout(drvdata->base,
-                             TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
+                             TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
                dev_err(drvdata->dev,
                        "timeout observed when probing at offset %#x\n",
                        TMC_STS);
        }
 }
 
-static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
+void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
 {
        u32 ffcr;
 
        ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
        ffcr |= TMC_FFCR_STOP_ON_FLUSH;
        writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
-       ffcr |= TMC_FFCR_FLUSHMAN;
+       ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
        writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
        /* Ensure flush completes */
        if (coresight_timeout(drvdata->base,
@@ -159,343 +60,73 @@ static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
                        TMC_FFCR);
        }
 
-       tmc_wait_for_ready(drvdata);
+       tmc_wait_for_tmcready(drvdata);
 }
 
-static void tmc_enable_hw(struct tmc_drvdata *drvdata)
+void tmc_enable_hw(struct tmc_drvdata *drvdata)
 {
        writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 }
 
-static void tmc_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_disable_hw(struct tmc_drvdata *drvdata)
 {
        writel_relaxed(0x0, drvdata->base + TMC_CTL);
 }
 
-static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
-{
-       /* Zero out the memory to help with debug */
-       memset(drvdata->buf, 0, drvdata->size);
-
-       CS_UNLOCK(drvdata->base);
-
-       writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
-       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
-                      TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
-                      TMC_FFCR_TRIGON_TRIGIN,
-                      drvdata->base + TMC_FFCR);
-
-       writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
-       tmc_enable_hw(drvdata);
-
-       CS_LOCK(drvdata->base);
-}
-
-static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
-{
-       u32 axictl;
-
-       /* Zero out the memory to help with debug */
-       memset(drvdata->vaddr, 0, drvdata->size);
-
-       CS_UNLOCK(drvdata->base);
-
-       writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
-       writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
-
-       axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
-       axictl |= TMC_AXICTL_WR_BURST_LEN;
-       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
-       axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
-       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
-       axictl = (axictl &
-                 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
-                 TMC_AXICTL_PROT_CTL_B1;
-       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
-
-       writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
-       writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
-       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
-                      TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
-                      TMC_FFCR_TRIGON_TRIGIN,
-                      drvdata->base + TMC_FFCR);
-       writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
-       tmc_enable_hw(drvdata);
-
-       CS_LOCK(drvdata->base);
-}
-
-static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
-{
-       CS_UNLOCK(drvdata->base);
-
-       writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
-       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
-                      drvdata->base + TMC_FFCR);
-       writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
-       tmc_enable_hw(drvdata);
-
-       CS_LOCK(drvdata->base);
-}
-
-static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
-{
-       unsigned long flags;
-
-       pm_runtime_get_sync(drvdata->dev);
-
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       if (drvdata->reading) {
-               spin_unlock_irqrestore(&drvdata->spinlock, flags);
-               pm_runtime_put(drvdata->dev);
-               return -EBUSY;
-       }
-
-       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
-               tmc_etb_enable_hw(drvdata);
-       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
-               tmc_etr_enable_hw(drvdata);
-       } else {
-               if (mode == TMC_MODE_CIRCULAR_BUFFER)
-                       tmc_etb_enable_hw(drvdata);
-               else
-                       tmc_etf_enable_hw(drvdata);
-       }
-       drvdata->enable = true;
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
-       dev_info(drvdata->dev, "TMC enabled\n");
-       return 0;
-}
-
-static int tmc_enable_sink(struct coresight_device *csdev)
-{
-       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
-       return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
-}
-
-static int tmc_enable_link(struct coresight_device *csdev, int inport,
-                          int outport)
+static int tmc_read_prepare(struct tmc_drvdata *drvdata)
 {
-       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
-       return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
-}
+       int ret = 0;
 
-static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
-{
-       enum tmc_mem_intf_width memwidth;
-       u8 memwords;
-       char *bufp;
-       u32 read_data;
-       int i;
-
-       memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
-       if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
-               memwords = 1;
-       else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
-               memwords = 2;
-       else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
-               memwords = 4;
-       else
-               memwords = 8;
-
-       bufp = drvdata->buf;
-       while (1) {
-               for (i = 0; i < memwords; i++) {
-                       read_data = readl_relaxed(drvdata->base + TMC_RRD);
-                       if (read_data == 0xFFFFFFFF)
-                               return;
-                       memcpy(bufp, &read_data, 4);
-                       bufp += 4;
-               }
+       switch (drvdata->config_type) {
+       case TMC_CONFIG_TYPE_ETB:
+       case TMC_CONFIG_TYPE_ETF:
+               ret = tmc_read_prepare_etb(drvdata);
+               break;
+       case TMC_CONFIG_TYPE_ETR:
+               ret = tmc_read_prepare_etr(drvdata);
+               break;
+       default:
+               ret = -EINVAL;
        }
-}
-
-static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
-{
-       CS_UNLOCK(drvdata->base);
 
-       tmc_flush_and_stop(drvdata);
-       tmc_etb_dump_hw(drvdata);
-       tmc_disable_hw(drvdata);
-
-       CS_LOCK(drvdata->base);
-}
+       if (!ret)
+               dev_info(drvdata->dev, "TMC read start\n");
 
-static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
-{
-       u32 rwp, val;
-
-       rwp = readl_relaxed(drvdata->base + TMC_RWP);
-       val = readl_relaxed(drvdata->base + TMC_STS);
-
-       /* How much memory do we still have */
-       if (val & BIT(0))
-               drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
-       else
-               drvdata->buf = drvdata->vaddr;
-}
-
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
-{
-       CS_UNLOCK(drvdata->base);
-
-       tmc_flush_and_stop(drvdata);
-       tmc_etr_dump_hw(drvdata);
-       tmc_disable_hw(drvdata);
-
-       CS_LOCK(drvdata->base);
-}
-
-static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
-{
-       CS_UNLOCK(drvdata->base);
-
-       tmc_flush_and_stop(drvdata);
-       tmc_disable_hw(drvdata);
-
-       CS_LOCK(drvdata->base);
+       return ret;
 }
 
-static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
+static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       if (drvdata->reading)
-               goto out;
+       int ret = 0;
 
-       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
-               tmc_etb_disable_hw(drvdata);
-       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
-               tmc_etr_disable_hw(drvdata);
-       } else {
-               if (mode == TMC_MODE_CIRCULAR_BUFFER)
-                       tmc_etb_disable_hw(drvdata);
-               else
-                       tmc_etf_disable_hw(drvdata);
+       switch (drvdata->config_type) {
+       case TMC_CONFIG_TYPE_ETB:
+       case TMC_CONFIG_TYPE_ETF:
+               ret = tmc_read_unprepare_etb(drvdata);
+               break;
+       case TMC_CONFIG_TYPE_ETR:
+               ret = tmc_read_unprepare_etr(drvdata);
+               break;
+       default:
+               ret = -EINVAL;
        }
-out:
-       drvdata->enable = false;
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
-       pm_runtime_put(drvdata->dev);
 
-       dev_info(drvdata->dev, "TMC disabled\n");
-}
+       if (!ret)
+               dev_info(drvdata->dev, "TMC read end\n");
 
-static void tmc_disable_sink(struct coresight_device *csdev)
-{
-       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
-       tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
-}
-
-static void tmc_disable_link(struct coresight_device *csdev, int inport,
-                            int outport)
-{
-       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
-       tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
-}
-
-static const struct coresight_ops_sink tmc_sink_ops = {
-       .enable         = tmc_enable_sink,
-       .disable        = tmc_disable_sink,
-};
-
-static const struct coresight_ops_link tmc_link_ops = {
-       .enable         = tmc_enable_link,
-       .disable        = tmc_disable_link,
-};
-
-static const struct coresight_ops tmc_etb_cs_ops = {
-       .sink_ops       = &tmc_sink_ops,
-};
-
-static const struct coresight_ops tmc_etr_cs_ops = {
-       .sink_ops       = &tmc_sink_ops,
-};
-
-static const struct coresight_ops tmc_etf_cs_ops = {
-       .sink_ops       = &tmc_sink_ops,
-       .link_ops       = &tmc_link_ops,
-};
-
-static int tmc_read_prepare(struct tmc_drvdata *drvdata)
-{
-       int ret;
-       unsigned long flags;
-       enum tmc_mode mode;
-
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       if (!drvdata->enable)
-               goto out;
-
-       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
-               tmc_etb_disable_hw(drvdata);
-       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
-               tmc_etr_disable_hw(drvdata);
-       } else {
-               mode = readl_relaxed(drvdata->base + TMC_MODE);
-               if (mode == TMC_MODE_CIRCULAR_BUFFER) {
-                       tmc_etb_disable_hw(drvdata);
-               } else {
-                       ret = -ENODEV;
-                       goto err;
-               }
-       }
-out:
-       drvdata->reading = true;
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
-       dev_info(drvdata->dev, "TMC read start\n");
-       return 0;
-err:
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
        return ret;
 }
 
-static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
-{
-       unsigned long flags;
-       enum tmc_mode mode;
-
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       if (!drvdata->enable)
-               goto out;
-
-       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
-               tmc_etb_enable_hw(drvdata);
-       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
-               tmc_etr_enable_hw(drvdata);
-       } else {
-               mode = readl_relaxed(drvdata->base + TMC_MODE);
-               if (mode == TMC_MODE_CIRCULAR_BUFFER)
-                       tmc_etb_enable_hw(drvdata);
-       }
-out:
-       drvdata->reading = false;
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
-       dev_info(drvdata->dev, "TMC read end\n");
-}
-
 static int tmc_open(struct inode *inode, struct file *file)
 {
+       int ret;
        struct tmc_drvdata *drvdata = container_of(file->private_data,
                                                   struct tmc_drvdata, miscdev);
-       int ret = 0;
-
-       if (drvdata->read_count++)
-               goto out;
 
        ret = tmc_read_prepare(drvdata);
        if (ret)
                return ret;
-out:
+
        nonseekable_open(inode, file);
 
        dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -535,19 +166,14 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
 
 static int tmc_release(struct inode *inode, struct file *file)
 {
+       int ret;
        struct tmc_drvdata *drvdata = container_of(file->private_data,
                                                   struct tmc_drvdata, miscdev);
 
-       if (--drvdata->read_count) {
-               if (drvdata->read_count < 0) {
-                       dev_err(drvdata->dev, "mismatched close\n");
-                       drvdata->read_count = 0;
-               }
-               goto out;
-       }
+       ret = tmc_read_unprepare(drvdata);
+       if (ret)
+               return ret;
 
-       tmc_read_unprepare(drvdata);
-out:
        dev_dbg(drvdata->dev, "%s: released\n", __func__);
        return 0;
 }
@@ -560,56 +186,71 @@ static const struct file_operations tmc_fops = {
        .llseek         = no_llseek,
 };
 
-static ssize_t status_show(struct device *dev,
-                          struct device_attribute *attr, char *buf)
+static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
 {
-       unsigned long flags;
-       u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
-       u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
-       u32 devid;
-       struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       enum tmc_mem_intf_width memwidth;
 
-       pm_runtime_get_sync(drvdata->dev);
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       CS_UNLOCK(drvdata->base);
-
-       tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
-       tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
-       tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
-       tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
-       tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
-       tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
-       tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
-       tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
-       tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
-       tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
-       devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+       /*
+        * Excerpt from the TRM:
+        *
+        * DEVID::MEMWIDTH[10:8]
+        * 0x2 Memory interface databus is 32 bits wide.
+        * 0x3 Memory interface databus is 64 bits wide.
+        * 0x4 Memory interface databus is 128 bits wide.
+        * 0x5 Memory interface databus is 256 bits wide.
+        */
+       switch (BMVAL(devid, 8, 10)) {
+       case 0x2:
+               memwidth = TMC_MEM_INTF_WIDTH_32BITS;
+               break;
+       case 0x3:
+               memwidth = TMC_MEM_INTF_WIDTH_64BITS;
+               break;
+       case 0x4:
+               memwidth = TMC_MEM_INTF_WIDTH_128BITS;
+               break;
+       case 0x5:
+               memwidth = TMC_MEM_INTF_WIDTH_256BITS;
+               break;
+       default:
+               memwidth = 0;
+       }
 
-       CS_LOCK(drvdata->base);
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(drvdata->dev);
-
-       return sprintf(buf,
-                      "Depth:\t\t0x%x\n"
-                      "Status:\t\t0x%x\n"
-                      "RAM read ptr:\t0x%x\n"
-                      "RAM wrt ptr:\t0x%x\n"
-                      "Trigger cnt:\t0x%x\n"
-                      "Control:\t0x%x\n"
-                      "Flush status:\t0x%x\n"
-                      "Flush ctrl:\t0x%x\n"
-                      "Mode:\t\t0x%x\n"
-                      "PSRC:\t\t0x%x\n"
-                      "DEVID:\t\t0x%x\n",
-                       tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
-                       tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
-
-       return -EINVAL;
+       return memwidth;
 }
-static DEVICE_ATTR_RO(status);
 
-static ssize_t trigger_cntr_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+#define coresight_tmc_simple_func(name, offset)                        \
+       coresight_simple_func(struct tmc_drvdata, name, offset)
+
+coresight_tmc_simple_func(rsz, TMC_RSZ);
+coresight_tmc_simple_func(sts, TMC_STS);
+coresight_tmc_simple_func(rrp, TMC_RRP);
+coresight_tmc_simple_func(rwp, TMC_RWP);
+coresight_tmc_simple_func(trg, TMC_TRG);
+coresight_tmc_simple_func(ctl, TMC_CTL);
+coresight_tmc_simple_func(ffsr, TMC_FFSR);
+coresight_tmc_simple_func(ffcr, TMC_FFCR);
+coresight_tmc_simple_func(mode, TMC_MODE);
+coresight_tmc_simple_func(pscr, TMC_PSCR);
+coresight_tmc_simple_func(devid, CORESIGHT_DEVID);
+
+static struct attribute *coresight_tmc_mgmt_attrs[] = {
+       &dev_attr_rsz.attr,
+       &dev_attr_sts.attr,
+       &dev_attr_rrp.attr,
+       &dev_attr_rwp.attr,
+       &dev_attr_trg.attr,
+       &dev_attr_ctl.attr,
+       &dev_attr_ffsr.attr,
+       &dev_attr_ffcr.attr,
+       &dev_attr_mode.attr,
+       &dev_attr_pscr.attr,
+       &dev_attr_devid.attr,
+       NULL,
+};
+
+ssize_t trigger_cntr_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
 {
        struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
        unsigned long val = drvdata->trigger_cntr;
@@ -634,26 +275,25 @@ static ssize_t trigger_cntr_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(trigger_cntr);
 
-static struct attribute *coresight_etb_attrs[] = {
+static struct attribute *coresight_tmc_attrs[] = {
        &dev_attr_trigger_cntr.attr,
-       &dev_attr_status.attr,
        NULL,
 };
-ATTRIBUTE_GROUPS(coresight_etb);
 
-static struct attribute *coresight_etr_attrs[] = {
-       &dev_attr_trigger_cntr.attr,
-       &dev_attr_status.attr,
-       NULL,
+static const struct attribute_group coresight_tmc_group = {
+       .attrs = coresight_tmc_attrs,
 };
-ATTRIBUTE_GROUPS(coresight_etr);
 
-static struct attribute *coresight_etf_attrs[] = {
-       &dev_attr_trigger_cntr.attr,
-       &dev_attr_status.attr,
+static const struct attribute_group coresight_tmc_mgmt_group = {
+       .attrs = coresight_tmc_mgmt_attrs,
+       .name = "mgmt",
+};
+
+const struct attribute_group *coresight_tmc_groups[] = {
+       &coresight_tmc_group,
+       &coresight_tmc_mgmt_group,
        NULL,
 };
-ATTRIBUTE_GROUPS(coresight_etf);
 
 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 {
@@ -692,6 +332,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 
        devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
        drvdata->config_type = BMVAL(devid, 6, 7);
+       drvdata->memwidth = tmc_get_memwidth(devid);
 
        if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
                if (np)
@@ -706,20 +347,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 
        pm_runtime_put(&adev->dev);
 
-       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
-               drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
-                                               &drvdata->paddr, GFP_KERNEL);
-               if (!drvdata->vaddr)
-                       return -ENOMEM;
-
-               memset(drvdata->vaddr, 0, drvdata->size);
-               drvdata->buf = drvdata->vaddr;
-       } else {
-               drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
-               if (!drvdata->buf)
-                       return -ENOMEM;
-       }
-
        desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
        if (!desc) {
                ret = -ENOMEM;
@@ -729,20 +356,18 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
        desc->pdata = pdata;
        desc->dev = dev;
        desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+       desc->groups = coresight_tmc_groups;
 
        if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
                desc->type = CORESIGHT_DEV_TYPE_SINK;
                desc->ops = &tmc_etb_cs_ops;
-               desc->groups = coresight_etb_groups;
        } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
                desc->type = CORESIGHT_DEV_TYPE_SINK;
                desc->ops = &tmc_etr_cs_ops;
-               desc->groups = coresight_etr_groups;
        } else {
                desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
                desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
                desc->ops = &tmc_etf_cs_ops;
-               desc->groups = coresight_etf_groups;
        }
 
        drvdata->csdev = coresight_register(desc);
@@ -758,7 +383,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
        if (ret)
                goto err_misc_register;
 
-       dev_info(dev, "TMC initialized\n");
        return 0;
 
 err_misc_register:
@@ -766,23 +390,10 @@ err_misc_register:
 err_devm_kzalloc:
        if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
                dma_free_coherent(dev, drvdata->size,
-                               &drvdata->paddr, GFP_KERNEL);
+                               drvdata->vaddr, drvdata->paddr);
        return ret;
 }
 
-static int tmc_remove(struct amba_device *adev)
-{
-       struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
-
-       misc_deregister(&drvdata->miscdev);
-       coresight_unregister(drvdata->csdev);
-       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
-               dma_free_coherent(drvdata->dev, drvdata->size,
-                                 &drvdata->paddr, GFP_KERNEL);
-
-       return 0;
-}
-
 static struct amba_id tmc_ids[] = {
        {
                .id     = 0x0003b961,
@@ -795,13 +406,9 @@ static struct amba_driver tmc_driver = {
        .drv = {
                .name   = "coresight-tmc",
                .owner  = THIS_MODULE,
+               .suppress_bind_attrs = true,
        },
        .probe          = tmc_probe,
-       .remove         = tmc_remove,
        .id_table       = tmc_ids,
 };
-
-module_amba_driver(tmc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
+builtin_amba_driver(tmc_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
new file mode 100644 (file)
index 0000000..5c5fe2a
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CORESIGHT_TMC_H
+#define _CORESIGHT_TMC_H
+
+#include <linux/miscdevice.h>
+
+#define TMC_RSZ                        0x004
+#define TMC_STS                        0x00c
+#define TMC_RRD                        0x010
+#define TMC_RRP                        0x014
+#define TMC_RWP                        0x018
+#define TMC_TRG                        0x01c
+#define TMC_CTL                        0x020
+#define TMC_RWD                        0x024
+#define TMC_MODE               0x028
+#define TMC_LBUFLEVEL          0x02c
+#define TMC_CBUFLEVEL          0x030
+#define TMC_BUFWM              0x034
+#define TMC_RRPHI              0x038
+#define TMC_RWPHI              0x03c
+#define TMC_AXICTL             0x110
+#define TMC_DBALO              0x118
+#define TMC_DBAHI              0x11c
+#define TMC_FFSR               0x300
+#define TMC_FFCR               0x304
+#define TMC_PSCR               0x308
+#define TMC_ITMISCOP0          0xee0
+#define TMC_ITTRFLIN           0xee8
+#define TMC_ITATBDATA0         0xeec
+#define TMC_ITATBCTR2          0xef0
+#define TMC_ITATBCTR1          0xef4
+#define TMC_ITATBCTR0          0xef8
+
+/* register description */
+/* TMC_CTL - 0x020 */
+#define TMC_CTL_CAPT_EN                BIT(0)
+/* TMC_STS - 0x00C */
+#define TMC_STS_TMCREADY_BIT   2
+#define TMC_STS_FULL           BIT(0)
+#define TMC_STS_TRIGGERED      BIT(1)
+/* TMC_AXICTL - 0x110 */
+#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
+#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
+#define TMC_AXICTL_SCT_GAT_MODE        BIT(7)
+#define TMC_AXICTL_WR_BURST_16 0xF00
+/* TMC_FFCR - 0x304 */
+#define TMC_FFCR_FLUSHMAN_BIT  6
+#define TMC_FFCR_EN_FMT                BIT(0)
+#define TMC_FFCR_EN_TI         BIT(1)
+#define TMC_FFCR_FON_FLIN      BIT(4)
+#define TMC_FFCR_FON_TRIG_EVT  BIT(5)
+#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
+#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
+
+
+enum tmc_config_type {
+       TMC_CONFIG_TYPE_ETB,
+       TMC_CONFIG_TYPE_ETR,
+       TMC_CONFIG_TYPE_ETF,
+};
+
+enum tmc_mode {
+       TMC_MODE_CIRCULAR_BUFFER,
+       TMC_MODE_SOFTWARE_FIFO,
+       TMC_MODE_HARDWARE_FIFO,
+};
+
+enum tmc_mem_intf_width {
+       TMC_MEM_INTF_WIDTH_32BITS       = 1,
+       TMC_MEM_INTF_WIDTH_64BITS       = 2,
+       TMC_MEM_INTF_WIDTH_128BITS      = 4,
+       TMC_MEM_INTF_WIDTH_256BITS      = 8,
+};
+
+/**
+ * struct tmc_drvdata - specifics associated to an TMC component
+ * @base:      memory mapped base address for this component.
+ * @dev:       the device entity associated to this component.
+ * @csdev:     component vitals needed by the framework.
+ * @miscdev:   specifics to handle "/dev/xyz.tmc" entry.
+ * @spinlock:  only one at a time pls.
+ * @buf:       area of memory where trace data get sent.
+ * @paddr:     DMA start location in RAM.
+ * @vaddr:     virtual representation of @paddr.
+ * @size:      @buf size.
+ * @mode:      how this TMC is being used.
+ * @config_type: TMC variant, must be of type @tmc_config_type.
+ * @memwidth:  width of the memory interface databus, in bytes.
+ * @trigger_cntr: amount of words to store after a trigger.
+ */
+struct tmc_drvdata {
+       void __iomem            *base;
+       struct device           *dev;
+       struct coresight_device *csdev;
+       struct miscdevice       miscdev;
+       spinlock_t              spinlock;
+       bool                    reading;
+       char                    *buf;
+       dma_addr_t              paddr;
+       void __iomem            *vaddr;
+       u32                     size;
+       local_t                 mode;
+       enum tmc_config_type    config_type;
+       enum tmc_mem_intf_width memwidth;
+       u32                     trigger_cntr;
+};
+
+/* Generic functions */
+void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
+void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
+void tmc_enable_hw(struct tmc_drvdata *drvdata);
+void tmc_disable_hw(struct tmc_drvdata *drvdata);
+
+/* ETB/ETF functions */
+int tmc_read_prepare_etb(struct tmc_drvdata *drvdata);
+int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata);
+extern const struct coresight_ops tmc_etb_cs_ops;
+extern const struct coresight_ops tmc_etf_cs_ops;
+
+/* ETR functions */
+int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
+int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
+extern const struct coresight_ops tmc_etr_cs_ops;
+#endif
index 7214efd10db52f9c2273ea5e0f86193034c8bb18..4e471e2e9d896df24399c9e02cdd06a463ac27be 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Trace Port Interface Unit driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/io.h>
@@ -70,11 +71,10 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
        CS_LOCK(drvdata->base);
 }
 
-static int tpiu_enable(struct coresight_device *csdev)
+static int tpiu_enable(struct coresight_device *csdev, u32 mode)
 {
        struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(csdev->dev.parent);
        tpiu_enable_hw(drvdata);
 
        dev_info(drvdata->dev, "TPIU enabled\n");
@@ -98,7 +98,6 @@ static void tpiu_disable(struct coresight_device *csdev)
        struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
        tpiu_disable_hw(drvdata);
-       pm_runtime_put(csdev->dev.parent);
 
        dev_info(drvdata->dev, "TPIU disabled\n");
 }
@@ -168,15 +167,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
        if (IS_ERR(drvdata->csdev))
                return PTR_ERR(drvdata->csdev);
 
-       dev_info(dev, "TPIU initialized\n");
-       return 0;
-}
-
-static int tpiu_remove(struct amba_device *adev)
-{
-       struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
-
-       coresight_unregister(drvdata->csdev);
        return 0;
 }
 
@@ -223,13 +213,9 @@ static struct amba_driver tpiu_driver = {
                .name   = "coresight-tpiu",
                .owner  = THIS_MODULE,
                .pm     = &tpiu_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
        .probe          = tpiu_probe,
-       .remove         = tpiu_remove,
        .id_table       = tpiu_ids,
 };
-
-module_amba_driver(tpiu_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
+builtin_amba_driver(tpiu_driver);
index 93738dfbf6313ea09f9f970ce463ec8374b4f661..5443d03a1eec4b09feedd4be794d7ef5d2c2dca4 100644 (file)
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/coresight.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
+#include <linux/pm_runtime.h>
 
 #include "coresight-priv.h"
 
 static DEFINE_MUTEX(coresight_mutex);
 
+/**
+ * struct coresight_node - elements of a path, from source to sink
+ * @csdev:     Address of an element.
+ * @link:      hook to the list.
+ */
+struct coresight_node {
+       struct coresight_device *csdev;
+       struct list_head link;
+};
+
+/*
+ * When operating Coresight drivers from the sysFS interface, only a single
+ * path can exist from a tracer (associated to a CPU) to a sink.
+ */
+static DEFINE_PER_CPU(struct list_head *, tracer_path);
+
+/*
+ * As of this writing only a single STM can be found in CS topologies.  Since
+ * there is no way to know if we'll ever see more and what kind of
+ * configuration they will enact, for the time being only define a single path
+ * for STM.
+ */
+static struct list_head *stm_path;
+
 static int coresight_id_match(struct device *dev, void *data)
 {
        int trace_id, i_trace_id;
@@ -68,15 +92,12 @@ static int coresight_source_is_unique(struct coresight_device *csdev)
                                 csdev, coresight_id_match);
 }
 
-static int coresight_find_link_inport(struct coresight_device *csdev)
+static int coresight_find_link_inport(struct coresight_device *csdev,
+                                     struct coresight_device *parent)
 {
        int i;
-       struct coresight_device *parent;
        struct coresight_connection *conn;
 
-       parent = container_of(csdev->path_link.next,
-                             struct coresight_device, path_link);
-
        for (i = 0; i < parent->nr_outport; i++) {
                conn = &parent->conns[i];
                if (conn->child_dev == csdev)
@@ -89,15 +110,12 @@ static int coresight_find_link_inport(struct coresight_device *csdev)
        return 0;
 }
 
-static int coresight_find_link_outport(struct coresight_device *csdev)
+static int coresight_find_link_outport(struct coresight_device *csdev,
+                                      struct coresight_device *child)
 {
        int i;
-       struct coresight_device *child;
        struct coresight_connection *conn;
 
-       child = container_of(csdev->path_link.prev,
-                            struct coresight_device, path_link);
-
        for (i = 0; i < csdev->nr_outport; i++) {
                conn = &csdev->conns[i];
                if (conn->child_dev == child)
@@ -110,13 +128,13 @@ static int coresight_find_link_outport(struct coresight_device *csdev)
        return 0;
 }
 
-static int coresight_enable_sink(struct coresight_device *csdev)
+static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
 {
        int ret;
 
        if (!csdev->enable) {
                if (sink_ops(csdev)->enable) {
-                       ret = sink_ops(csdev)->enable(csdev);
+                       ret = sink_ops(csdev)->enable(csdev, mode);
                        if (ret)
                                return ret;
                }
@@ -138,14 +156,19 @@ static void coresight_disable_sink(struct coresight_device *csdev)
        }
 }
 
-static int coresight_enable_link(struct coresight_device *csdev)
+static int coresight_enable_link(struct coresight_device *csdev,
+                                struct coresight_device *parent,
+                                struct coresight_device *child)
 {
        int ret;
        int link_subtype;
        int refport, inport, outport;
 
-       inport = coresight_find_link_inport(csdev);
-       outport = coresight_find_link_outport(csdev);
+       if (!parent || !child)
+               return -EINVAL;
+
+       inport = coresight_find_link_inport(csdev, parent);
+       outport = coresight_find_link_outport(csdev, child);
        link_subtype = csdev->subtype.link_subtype;
 
        if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
@@ -168,14 +191,19 @@ static int coresight_enable_link(struct coresight_device *csdev)
        return 0;
 }
 
-static void coresight_disable_link(struct coresight_device *csdev)
+static void coresight_disable_link(struct coresight_device *csdev,
+                                  struct coresight_device *parent,
+                                  struct coresight_device *child)
 {
        int i, nr_conns;
        int link_subtype;
        int refport, inport, outport;
 
-       inport = coresight_find_link_inport(csdev);
-       outport = coresight_find_link_outport(csdev);
+       if (!parent || !child)
+               return;
+
+       inport = coresight_find_link_inport(csdev, parent);
+       outport = coresight_find_link_outport(csdev, child);
        link_subtype = csdev->subtype.link_subtype;
 
        if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
@@ -201,7 +229,7 @@ static void coresight_disable_link(struct coresight_device *csdev)
        csdev->enable = false;
 }
 
-static int coresight_enable_source(struct coresight_device *csdev)
+static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
 {
        int ret;
 
@@ -213,7 +241,7 @@ static int coresight_enable_source(struct coresight_device *csdev)
 
        if (!csdev->enable) {
                if (source_ops(csdev)->enable) {
-                       ret = source_ops(csdev)->enable(csdev);
+                       ret = source_ops(csdev)->enable(csdev, NULL, mode);
                        if (ret)
                                return ret;
                }
@@ -235,147 +263,328 @@ static void coresight_disable_source(struct coresight_device *csdev)
        }
 }
 
-static int coresight_enable_path(struct list_head *path)
+void coresight_disable_path(struct list_head *path)
 {
-       int ret = 0;
-       struct coresight_device *cd;
+       u32 type;
+       struct coresight_node *nd;
+       struct coresight_device *csdev, *parent, *child;
 
-       /*
-        * At this point we have a full @path, from source to sink.  The
-        * sink is the first entry and the source the last one.  Go through
-        * all the components and enable them one by one.
-        */
-       list_for_each_entry(cd, path, path_link) {
-               if (cd == list_first_entry(path, struct coresight_device,
-                                          path_link)) {
-                       ret = coresight_enable_sink(cd);
-               } else if (list_is_last(&cd->path_link, path)) {
-                       /*
-                        * Don't enable the source just yet - this needs to
-                        * happen at the very end when all links and sink
-                        * along the path have been configured properly.
-                        */
-                       ;
-               } else {
-                       ret = coresight_enable_link(cd);
+       list_for_each_entry(nd, path, link) {
+               csdev = nd->csdev;
+               type = csdev->type;
+
+               /*
+                * ETF devices are tricky... They can be a link or a sink,
+                * depending on how they are configured.  If an ETF has been
+                * "activated" it will be configured as a sink, otherwise
+                * go ahead with the link configuration.
+                */
+               if (type == CORESIGHT_DEV_TYPE_LINKSINK)
+                       type = (csdev == coresight_get_sink(path)) ?
+                                               CORESIGHT_DEV_TYPE_SINK :
+                                               CORESIGHT_DEV_TYPE_LINK;
+
+               switch (type) {
+               case CORESIGHT_DEV_TYPE_SINK:
+                       coresight_disable_sink(csdev);
+                       break;
+               case CORESIGHT_DEV_TYPE_SOURCE:
+                       /* sources are disabled from either sysFS or Perf */
+                       break;
+               case CORESIGHT_DEV_TYPE_LINK:
+                       parent = list_prev_entry(nd, link)->csdev;
+                       child = list_next_entry(nd, link)->csdev;
+                       coresight_disable_link(csdev, parent, child);
+                       break;
+               default:
+                       break;
                }
-               if (ret)
-                       goto err;
        }
+}
 
-       return 0;
-err:
-       list_for_each_entry_continue_reverse(cd, path, path_link) {
-               if (cd == list_first_entry(path, struct coresight_device,
-                                          path_link)) {
-                       coresight_disable_sink(cd);
-               } else if (list_is_last(&cd->path_link, path)) {
-                       ;
-               } else {
-                       coresight_disable_link(cd);
+int coresight_enable_path(struct list_head *path, u32 mode)
+{
+
+       int ret = 0;
+       u32 type;
+       struct coresight_node *nd;
+       struct coresight_device *csdev, *parent, *child;
+
+       list_for_each_entry_reverse(nd, path, link) {
+               csdev = nd->csdev;
+               type = csdev->type;
+
+               /*
+                * ETF devices are tricky... They can be a link or a sink,
+                * depending on how they are configured.  If an ETF has been
+                * "activated" it will be configured as a sink, otherwise
+                * go ahead with the link configuration.
+                */
+               if (type == CORESIGHT_DEV_TYPE_LINKSINK)
+                       type = (csdev == coresight_get_sink(path)) ?
+                                               CORESIGHT_DEV_TYPE_SINK :
+                                               CORESIGHT_DEV_TYPE_LINK;
+
+               switch (type) {
+               case CORESIGHT_DEV_TYPE_SINK:
+                       ret = coresight_enable_sink(csdev, mode);
+                       if (ret)
+                               goto err;
+                       break;
+               case CORESIGHT_DEV_TYPE_SOURCE:
+                       /* sources are enabled from either sysFS or Perf */
+                       break;
+               case CORESIGHT_DEV_TYPE_LINK:
+                       parent = list_prev_entry(nd, link)->csdev;
+                       child = list_next_entry(nd, link)->csdev;
+                       ret = coresight_enable_link(csdev, parent, child);
+                       if (ret)
+                               goto err;
+                       break;
+               default:
+                       goto err;
                }
        }
 
+out:
        return ret;
+err:
+       coresight_disable_path(path);
+       goto out;
 }
 
-static int coresight_disable_path(struct list_head *path)
+struct coresight_device *coresight_get_sink(struct list_head *path)
 {
-       struct coresight_device *cd;
+       struct coresight_device *csdev;
 
-       list_for_each_entry_reverse(cd, path, path_link) {
-               if (cd == list_first_entry(path, struct coresight_device,
-                                          path_link)) {
-                       coresight_disable_sink(cd);
-               } else if (list_is_last(&cd->path_link, path)) {
-                       /*
-                        * The source has already been stopped, no need
-                        * to do it again here.
-                        */
-                       ;
-               } else {
-                       coresight_disable_link(cd);
+       if (!path)
+               return NULL;
+
+       csdev = list_last_entry(path, struct coresight_node, link)->csdev;
+       if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+           csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+               return NULL;
+
+       return csdev;
+}
+
+/**
+ * _coresight_build_path - recursively build a path from a @csdev to a sink.
+ * @csdev:     The device to start from.
+ * @path:      The list to add devices to.
+ *
+ * The tree of Coresight device is traversed until an activated sink is
+ * found.  From there the sink is added to the list along with all the
+ * devices that led to that point - the end result is a list from source
+ * to sink. In that list the source is the first device and the sink the
+ * last one.
+ */
+static int _coresight_build_path(struct coresight_device *csdev,
+                                struct list_head *path)
+{
+       int i;
+       bool found = false;
+       struct coresight_node *node;
+       struct coresight_connection *conn;
+
+       /* An activated sink has been found.  Enqueue the element */
+       if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+            csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+               goto out;
+
+       /* Not a sink - recursively explore each port found on this element */
+       for (i = 0; i < csdev->nr_outport; i++) {
+               conn = &csdev->conns[i];
+               if (_coresight_build_path(conn->child_dev, path) == 0) {
+                       found = true;
+                       break;
                }
        }
 
+       if (!found)
+               return -ENODEV;
+
+out:
+       /*
+        * A path from this element to a sink has been found.  The elements
+        * leading to the sink are already enqueued, all that is left to do
+        * is tell the PM runtime core we need this element and add a node
+        * for it.
+        */
+       node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       node->csdev = csdev;
+       list_add(&node->link, path);
+       pm_runtime_get_sync(csdev->dev.parent);
+
        return 0;
 }
 
-static int coresight_build_paths(struct coresight_device *csdev,
-                                struct list_head *path,
-                                bool enable)
+struct list_head *coresight_build_path(struct coresight_device *csdev)
 {
-       int i, ret = -EINVAL;
-       struct coresight_connection *conn;
+       struct list_head *path;
 
-       list_add(&csdev->path_link, path);
+       path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+       if (!path)
+               return NULL;
 
-       if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
-           csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
-           csdev->activated) {
-               if (enable)
-                       ret = coresight_enable_path(path);
-               else
-                       ret = coresight_disable_path(path);
-       } else {
-               for (i = 0; i < csdev->nr_outport; i++) {
-                       conn = &csdev->conns[i];
-                       if (coresight_build_paths(conn->child_dev,
-                                                   path, enable) == 0)
-                               ret = 0;
-               }
+       INIT_LIST_HEAD(path);
+
+       if (_coresight_build_path(csdev, path)) {
+               kfree(path);
+               path = NULL;
+       }
+
+       return path;
+}
+
+/**
+ * coresight_release_path - release a previously built path.
+ * @path:      the path to release.
+ *
+ * Go through all the elements of a path and 1) removed it from the list and
+ * 2) free the memory allocated for each node.
+ */
+void coresight_release_path(struct list_head *path)
+{
+       struct coresight_device *csdev;
+       struct coresight_node *nd, *next;
+
+       list_for_each_entry_safe(nd, next, path, link) {
+               csdev = nd->csdev;
+
+               pm_runtime_put_sync(csdev->dev.parent);
+               list_del(&nd->link);
+               kfree(nd);
        }
 
-       if (list_first_entry(path, struct coresight_device, path_link) != csdev)
-               dev_err(&csdev->dev, "wrong device in %s\n", __func__);
+       kfree(path);
+       path = NULL;
+}
 
-       list_del(&csdev->path_link);
+/** coresight_validate_source - make sure a source has the right credentials
+ *  @csdev:    the device structure for a source.
+ *  @function: the function this was called from.
+ *
+ * Assumes the coresight_mutex is held.
+ */
+static int coresight_validate_source(struct coresight_device *csdev,
+                                    const char *function)
+{
+       u32 type, subtype;
 
-       return ret;
+       type = csdev->type;
+       subtype = csdev->subtype.source_subtype;
+
+       if (type != CORESIGHT_DEV_TYPE_SOURCE) {
+               dev_err(&csdev->dev, "wrong device type in %s\n", function);
+               return -EINVAL;
+       }
+
+       if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
+           subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE) {
+               dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int coresight_enable(struct coresight_device *csdev)
 {
-       int ret = 0;
-       LIST_HEAD(path);
+       int cpu, ret = 0;
+       struct list_head *path;
 
        mutex_lock(&coresight_mutex);
-       if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
-               ret = -EINVAL;
-               dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
+
+       ret = coresight_validate_source(csdev, __func__);
+       if (ret)
                goto out;
-       }
+
        if (csdev->enable)
                goto out;
 
-       if (coresight_build_paths(csdev, &path, true)) {
-               dev_err(&csdev->dev, "building path(s) failed\n");
+       path = coresight_build_path(csdev);
+       if (!path) {
+               pr_err("building path(s) failed\n");
                goto out;
        }
 
-       if (coresight_enable_source(csdev))
-               dev_err(&csdev->dev, "source enable failed\n");
+       ret = coresight_enable_path(path, CS_MODE_SYSFS);
+       if (ret)
+               goto err_path;
+
+       ret = coresight_enable_source(csdev, CS_MODE_SYSFS);
+       if (ret)
+               goto err_source;
+
+       switch (csdev->subtype.source_subtype) {
+       case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
+               /*
+                * When working from sysFS it is important to keep track
+                * of the paths that were created so that they can be
+                * undone in 'coresight_disable()'.  Since there can only
+                * be a single session per tracer (when working from sysFS)
+                * a per-cpu variable will do just fine.
+                */
+               cpu = source_ops(csdev)->cpu_id(csdev);
+               per_cpu(tracer_path, cpu) = path;
+               break;
+       case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+               stm_path = path;
+               break;
+       default:
+               /* We can't be here */
+               break;
+       }
+
 out:
        mutex_unlock(&coresight_mutex);
        return ret;
+
+err_source:
+       coresight_disable_path(path);
+
+err_path:
+       coresight_release_path(path);
+       goto out;
 }
 EXPORT_SYMBOL_GPL(coresight_enable);
 
 void coresight_disable(struct coresight_device *csdev)
 {
-       LIST_HEAD(path);
+       int cpu, ret;
+       struct list_head *path = NULL;
 
        mutex_lock(&coresight_mutex);
-       if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
-               dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
+
+       ret = coresight_validate_source(csdev, __func__);
+       if (ret)
                goto out;
-       }
+
        if (!csdev->enable)
                goto out;
 
+       switch (csdev->subtype.source_subtype) {
+       case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
+               cpu = source_ops(csdev)->cpu_id(csdev);
+               path = per_cpu(tracer_path, cpu);
+               per_cpu(tracer_path, cpu) = NULL;
+               break;
+       case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+               path = stm_path;
+               stm_path = NULL;
+               break;
+       default:
+               /* We can't be here */
+               break;
+       }
+
        coresight_disable_source(csdev);
-       if (coresight_build_paths(csdev, &path, false))
-               dev_err(&csdev->dev, "releasing path(s) failed\n");
+       coresight_disable_path(path);
+       coresight_release_path(path);
 
 out:
        mutex_unlock(&coresight_mutex);
@@ -387,7 +596,7 @@ static ssize_t enable_sink_show(struct device *dev,
 {
        struct coresight_device *csdev = to_coresight_device(dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated);
+       return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->activated);
 }
 
 static ssize_t enable_sink_store(struct device *dev,
@@ -417,7 +626,7 @@ static ssize_t enable_source_show(struct device *dev,
 {
        struct coresight_device *csdev = to_coresight_device(dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
+       return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->enable);
 }
 
 static ssize_t enable_source_store(struct device *dev,
@@ -481,6 +690,8 @@ static void coresight_device_release(struct device *dev)
 {
        struct coresight_device *csdev = to_coresight_device(dev);
 
+       kfree(csdev->conns);
+       kfree(csdev->refcnt);
        kfree(csdev);
 }
 
@@ -536,7 +747,7 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
         * are hooked-up with each newly added component.
         */
        bus_for_each_dev(&coresight_bustype, NULL,
-                                csdev, coresight_orphan_match);
+                        csdev, coresight_orphan_match);
 }
 
 
@@ -568,6 +779,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
 
                if (dev) {
                        conn->child_dev = to_coresight_device(dev);
+                       /* and put reference from 'bus_find_device()' */
+                       put_device(dev);
                } else {
                        csdev->orphan = true;
                        conn->child_dev = NULL;
@@ -575,6 +788,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
        }
 }
 
+static int coresight_remove_match(struct device *dev, void *data)
+{
+       int i;
+       struct coresight_device *csdev, *iterator;
+       struct coresight_connection *conn;
+
+       csdev = data;
+       iterator = to_coresight_device(dev);
+
+       /* No need to check oneself */
+       if (csdev == iterator)
+               return 0;
+
+       /*
+        * Circle throuch all the connection of that component.  If we find
+        * a connection whose name matches @csdev, remove it.
+        */
+       for (i = 0; i < iterator->nr_outport; i++) {
+               conn = &iterator->conns[i];
+
+               if (conn->child_dev == NULL)
+                       continue;
+
+               if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+                       iterator->orphan = true;
+                       conn->child_dev = NULL;
+                       /* No need to continue */
+                       break;
+               }
+       }
+
+       /*
+        * Returning '0' ensures that all known component on the
+        * bus will be checked.
+        */
+       return 0;
+}
+
+static void coresight_remove_conns(struct coresight_device *csdev)
+{
+       bus_for_each_dev(&coresight_bustype, NULL,
+                        csdev, coresight_remove_match);
+}
+
 /**
  * coresight_timeout - loop until a bit has changed to a specific state.
  * @addr: base address of the area of interest.
@@ -713,13 +970,8 @@ EXPORT_SYMBOL_GPL(coresight_register);
 
 void coresight_unregister(struct coresight_device *csdev)
 {
-       mutex_lock(&coresight_mutex);
-
-       kfree(csdev->conns);
+       /* Remove references of that device in the topology */
+       coresight_remove_conns(csdev);
        device_unregister(&csdev->dev);
-
-       mutex_unlock(&coresight_mutex);
 }
 EXPORT_SYMBOL_GPL(coresight_unregister);
-
-MODULE_LICENSE("GPL v2");
index b0973617826f62b41b31072a7ca3a7499319d4ad..b68da1888fd515879a43df8f6173dd77cdb8754e 100644 (file)
@@ -10,7 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/err.h>
 #include <linux/slab.h>
@@ -86,7 +85,7 @@ static int of_coresight_alloc_memory(struct device *dev,
                return -ENOMEM;
 
        /* Children connected to this component via @outports */
-        pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+       pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
                                          sizeof(*pdata->child_names),
                                          GFP_KERNEL);
        if (!pdata->child_names)
index e7a348807f0cb2ce91fd48e2d133f034830763be..847a39b353078618fa9009691598b4ed1d5ef583 100644 (file)
@@ -9,6 +9,8 @@ config STM
 
          Say Y here to enable System Trace Module device support.
 
+if STM
+
 config STM_DUMMY
        tristate "Dummy STM driver"
        help
@@ -25,3 +27,16 @@ config STM_SOURCE_CONSOLE
 
          If you want to send kernel console messages over STM devices,
          say Y.
+
+config STM_SOURCE_HEARTBEAT
+       tristate "Heartbeat over STM devices"
+       help
+         This is a kernel space trace source that sends periodic
+         heartbeat messages to trace hosts over STM devices. It is
+         also useful for testing stm class drivers and the stm class
+         framework itself.
+
+         If you want to send heartbeat messages over STM devices,
+         say Y.
+
+endif
index f9312c38dd7a8bcbfd1fce502c600229f4ff2930..a9ce3d487e5787d18eafddd06fa1be1b611ce457 100644 (file)
@@ -5,5 +5,7 @@ stm_core-y              := core.o policy.o
 obj-$(CONFIG_STM_DUMMY)        += dummy_stm.o
 
 obj-$(CONFIG_STM_SOURCE_CONSOLE)       += stm_console.o
+obj-$(CONFIG_STM_SOURCE_HEARTBEAT)     += stm_heartbeat.o
 
 stm_console-y          := console.o
+stm_heartbeat-y                := heartbeat.o
index b6445d9e54533d224a89fbe98ad7fcfad52ac19d..02095410cb338ecd9bc6d1bc8f9b6619d096254d 100644 (file)
@@ -67,9 +67,24 @@ static ssize_t channels_show(struct device *dev,
 
 static DEVICE_ATTR_RO(channels);
 
+static ssize_t hw_override_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct stm_device *stm = to_stm_device(dev);
+       int ret;
+
+       ret = sprintf(buf, "%u\n", stm->data->hw_override);
+
+       return ret;
+}
+
+static DEVICE_ATTR_RO(hw_override);
+
 static struct attribute *stm_attrs[] = {
        &dev_attr_masters.attr,
        &dev_attr_channels.attr,
+       &dev_attr_hw_override.attr,
        NULL,
 };
 
@@ -113,6 +128,7 @@ struct stm_device *stm_find_device(const char *buf)
 
        stm = to_stm_device(dev);
        if (!try_module_get(stm->owner)) {
+               /* matches class_find_device() above */
                put_device(dev);
                return NULL;
        }
@@ -125,7 +141,7 @@ struct stm_device *stm_find_device(const char *buf)
  * @stm:       stm device, previously acquired by stm_find_device()
  *
  * This drops the module reference and device reference taken by
- * stm_find_device().
+ * stm_find_device() or stm_char_open().
  */
 void stm_put_device(struct stm_device *stm)
 {
@@ -185,6 +201,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
 {
        struct stp_master *master = stm_master(stm, output->master);
 
+       lockdep_assert_held(&stm->mc_lock);
+       lockdep_assert_held(&output->lock);
+
        if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
                return;
 
@@ -199,6 +218,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
 {
        struct stp_master *master = stm_master(stm, output->master);
 
+       lockdep_assert_held(&stm->mc_lock);
+       lockdep_assert_held(&output->lock);
+
        bitmap_release_region(&master->chan_map[0], output->channel,
                              ilog2(output->nr_chans));
 
@@ -288,6 +310,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
        }
 
        spin_lock(&stm->mc_lock);
+       spin_lock(&output->lock);
        /* output is already assigned -- shouldn't happen */
        if (WARN_ON_ONCE(output->nr_chans))
                goto unlock;
@@ -304,6 +327,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
 
        ret = 0;
 unlock:
+       spin_unlock(&output->lock);
        spin_unlock(&stm->mc_lock);
 
        return ret;
@@ -312,11 +336,18 @@ unlock:
 static void stm_output_free(struct stm_device *stm, struct stm_output *output)
 {
        spin_lock(&stm->mc_lock);
+       spin_lock(&output->lock);
        if (output->nr_chans)
                stm_output_disclaim(stm, output);
+       spin_unlock(&output->lock);
        spin_unlock(&stm->mc_lock);
 }
 
+static void stm_output_init(struct stm_output *output)
+{
+       spin_lock_init(&output->lock);
+}
+
 static int major_match(struct device *dev, const void *data)
 {
        unsigned int major = *(unsigned int *)data;
@@ -339,6 +370,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
        if (!stmf)
                return -ENOMEM;
 
+       stm_output_init(&stmf->output);
        stmf->stm = to_stm_device(dev);
 
        if (!try_module_get(stmf->stm->owner))
@@ -349,6 +381,8 @@ static int stm_char_open(struct inode *inode, struct file *file)
        return nonseekable_open(inode, file);
 
 err_free:
+       /* matches class_find_device() above */
+       put_device(dev);
        kfree(stmf);
 
        return err;
@@ -357,9 +391,19 @@ err_free:
 static int stm_char_release(struct inode *inode, struct file *file)
 {
        struct stm_file *stmf = file->private_data;
+       struct stm_device *stm = stmf->stm;
+
+       if (stm->data->unlink)
+               stm->data->unlink(stm->data, stmf->output.master,
+                                 stmf->output.channel);
+
+       stm_output_free(stm, &stmf->output);
 
-       stm_output_free(stmf->stm, &stmf->output);
-       stm_put_device(stmf->stm);
+       /*
+        * matches the stm_char_open()'s
+        * class_find_device() + try_module_get()
+        */
+       stm_put_device(stm);
        kfree(stmf);
 
        return 0;
@@ -380,8 +424,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
        return ret;
 }
 
-static void stm_write(struct stm_data *data, unsigned int master,
-                     unsigned int channel, const char *buf, size_t count)
+static ssize_t stm_write(struct stm_data *data, unsigned int master,
+                         unsigned int channel, const char *buf, size_t count)
 {
        unsigned int flags = STP_PACKET_TIMESTAMPED;
        const unsigned char *p = buf, nil = 0;
@@ -393,9 +437,14 @@ static void stm_write(struct stm_data *data, unsigned int master,
                sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
                                  sz, p);
                flags = 0;
+
+               if (sz < 0)
+                       break;
        }
 
        data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+
+       return pos;
 }
 
 static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -406,6 +455,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
        char *kbuf;
        int err;
 
+       if (count + 1 > PAGE_SIZE)
+               count = PAGE_SIZE - 1;
+
        /*
         * if no m/c have been assigned to this writer up to this
         * point, use "default" policy entry
@@ -430,8 +482,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
                return -EFAULT;
        }
 
-       stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf,
-                 count);
+       count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
+                         kbuf, count);
 
        kfree(kbuf);
 
@@ -509,16 +561,12 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
        if (ret)
                goto err_free;
 
-       ret = 0;
-
        if (stm->data->link)
                ret = stm->data->link(stm->data, stmf->output.master,
                                      stmf->output.channel);
 
-       if (ret) {
+       if (ret)
                stm_output_free(stmf->stm, &stmf->output);
-               stm_put_device(stmf->stm);
-       }
 
 err_free:
        kfree(id);
@@ -633,17 +681,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
        stm->dev.parent = parent;
        stm->dev.release = stm_device_release;
 
-       err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
-       if (err)
-               goto err_device;
-
-       err = device_add(&stm->dev);
-       if (err)
-               goto err_device;
-
+       mutex_init(&stm->link_mutex);
        spin_lock_init(&stm->link_lock);
        INIT_LIST_HEAD(&stm->link_list);
 
+       /* initialize the object before it is accessible via sysfs */
        spin_lock_init(&stm->mc_lock);
        mutex_init(&stm->policy_mutex);
        stm->sw_nmasters = nmasters;
@@ -651,9 +693,20 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
        stm->data = stm_data;
        stm_data->stm = stm;
 
+       err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
+       if (err)
+               goto err_device;
+
+       err = device_add(&stm->dev);
+       if (err)
+               goto err_device;
+
        return 0;
 
 err_device:
+       unregister_chrdev(stm->major, stm_data->name);
+
+       /* matches device_initialize() above */
        put_device(&stm->dev);
 err_free:
        kfree(stm);
@@ -662,20 +715,28 @@ err_free:
 }
 EXPORT_SYMBOL_GPL(stm_register_device);
 
-static void __stm_source_link_drop(struct stm_source_device *src,
-                                  struct stm_device *stm);
+static int __stm_source_link_drop(struct stm_source_device *src,
+                                 struct stm_device *stm);
 
 void stm_unregister_device(struct stm_data *stm_data)
 {
        struct stm_device *stm = stm_data->stm;
        struct stm_source_device *src, *iter;
-       int i;
+       int i, ret;
 
-       spin_lock(&stm->link_lock);
+       mutex_lock(&stm->link_mutex);
        list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
-               __stm_source_link_drop(src, stm);
+               ret = __stm_source_link_drop(src, stm);
+               /*
+                * src <-> stm link must not change under the same
+                * stm::link_mutex, so complain loudly if it has;
+                * also in this situation ret!=0 means this src is
+                * not connected to this stm and it should be otherwise
+                * safe to proceed with the tear-down of stm.
+                */
+               WARN_ON_ONCE(ret);
        }
-       spin_unlock(&stm->link_lock);
+       mutex_unlock(&stm->link_mutex);
 
        synchronize_srcu(&stm_source_srcu);
 
@@ -694,6 +755,17 @@ void stm_unregister_device(struct stm_data *stm_data)
 }
 EXPORT_SYMBOL_GPL(stm_unregister_device);
 
+/*
+ * stm::link_list access serialization uses a spinlock and a mutex; holding
+ * either of them guarantees that the list is stable; modification requires
+ * holding both of them.
+ *
+ * Lock ordering is as follows:
+ *   stm::link_mutex
+ *     stm::link_lock
+ *       src::link_lock
+ */
+
 /**
  * stm_source_link_add() - connect an stm_source device to an stm device
  * @src:       stm_source device
@@ -710,6 +782,7 @@ static int stm_source_link_add(struct stm_source_device *src,
        char *id;
        int err;
 
+       mutex_lock(&stm->link_mutex);
        spin_lock(&stm->link_lock);
        spin_lock(&src->link_lock);
 
@@ -719,6 +792,7 @@ static int stm_source_link_add(struct stm_source_device *src,
 
        spin_unlock(&src->link_lock);
        spin_unlock(&stm->link_lock);
+       mutex_unlock(&stm->link_mutex);
 
        id = kstrdup(src->data->name, GFP_KERNEL);
        if (id) {
@@ -753,9 +827,9 @@ static int stm_source_link_add(struct stm_source_device *src,
 
 fail_free_output:
        stm_output_free(stm, &src->output);
-       stm_put_device(stm);
 
 fail_detach:
+       mutex_lock(&stm->link_mutex);
        spin_lock(&stm->link_lock);
        spin_lock(&src->link_lock);
 
@@ -764,6 +838,7 @@ fail_detach:
 
        spin_unlock(&src->link_lock);
        spin_unlock(&stm->link_lock);
+       mutex_unlock(&stm->link_mutex);
 
        return err;
 }
@@ -776,28 +851,55 @@ fail_detach:
  * If @stm is @src::link, disconnect them from one another and put the
  * reference on the @stm device.
  *
- * Caller must hold stm::link_lock.
+ * Caller must hold stm::link_mutex.
  */
-static void __stm_source_link_drop(struct stm_source_device *src,
-                                  struct stm_device *stm)
+static int __stm_source_link_drop(struct stm_source_device *src,
+                                 struct stm_device *stm)
 {
        struct stm_device *link;
+       int ret = 0;
+
+       lockdep_assert_held(&stm->link_mutex);
 
+       /* for stm::link_list modification, we hold both mutex and spinlock */
+       spin_lock(&stm->link_lock);
        spin_lock(&src->link_lock);
        link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
-       if (WARN_ON_ONCE(link != stm)) {
-               spin_unlock(&src->link_lock);
-               return;
+
+       /*
+        * The linked device may have changed since we last looked, because
+        * we weren't holding the src::link_lock back then; if this is the
+        * case, tell the caller to retry.
+        */
+       if (link != stm) {
+               ret = -EAGAIN;
+               goto unlock;
        }
 
        stm_output_free(link, &src->output);
-       /* caller must hold stm::link_lock */
        list_del_init(&src->link_entry);
        /* matches stm_find_device() from stm_source_link_store() */
        stm_put_device(link);
        rcu_assign_pointer(src->link, NULL);
 
+unlock:
        spin_unlock(&src->link_lock);
+       spin_unlock(&stm->link_lock);
+
+       /*
+        * Call the unlink callbacks for both source and stm, when we know
+        * that we have actually performed the unlinking.
+        */
+       if (!ret) {
+               if (src->data->unlink)
+                       src->data->unlink(src->data);
+
+               if (stm->data->unlink)
+                       stm->data->unlink(stm->data, src->output.master,
+                                         src->output.channel);
+       }
+
+       return ret;
 }
 
 /**
@@ -813,21 +915,29 @@ static void __stm_source_link_drop(struct stm_source_device *src,
 static void stm_source_link_drop(struct stm_source_device *src)
 {
        struct stm_device *stm;
-       int idx;
+       int idx, ret;
 
+retry:
        idx = srcu_read_lock(&stm_source_srcu);
+       /*
+        * The stm device will be valid for the duration of this
+        * read section, but the link may change before we grab
+        * the src::link_lock in __stm_source_link_drop().
+        */
        stm = srcu_dereference(src->link, &stm_source_srcu);
 
+       ret = 0;
        if (stm) {
-               if (src->data->unlink)
-                       src->data->unlink(src->data);
-
-               spin_lock(&stm->link_lock);
-               __stm_source_link_drop(src, stm);
-               spin_unlock(&stm->link_lock);
+               mutex_lock(&stm->link_mutex);
+               ret = __stm_source_link_drop(src, stm);
+               mutex_unlock(&stm->link_mutex);
        }
 
        srcu_read_unlock(&stm_source_srcu, idx);
+
+       /* if it did change, retry */
+       if (ret == -EAGAIN)
+               goto retry;
 }
 
 static ssize_t stm_source_link_show(struct device *dev,
@@ -862,8 +972,10 @@ static ssize_t stm_source_link_store(struct device *dev,
                return -EINVAL;
 
        err = stm_source_link_add(src, link);
-       if (err)
+       if (err) {
+               /* matches the stm_find_device() above */
                stm_put_device(link);
+       }
 
        return err ? : count;
 }
@@ -925,6 +1037,7 @@ int stm_source_register_device(struct device *parent,
        if (err)
                goto err;
 
+       stm_output_init(&src->output);
        spin_lock_init(&src->link_lock);
        INIT_LIST_HEAD(&src->link_entry);
        src->data = data;
@@ -973,9 +1086,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan,
 
        stm = srcu_dereference(src->link, &stm_source_srcu);
        if (stm)
-               stm_write(stm->data, src->output.master,
-                         src->output.channel + chan,
-                         buf, count);
+               count = stm_write(stm->data, src->output.master,
+                                 src->output.channel + chan,
+                                 buf, count);
        else
                count = -ENODEV;
 
index 3709bef0b21ff2d4ffafe29347e1f9872fbe17de..a86612d989f963ca4f31db8b39002351abab76ec 100644 (file)
@@ -40,22 +40,71 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
        return size;
 }
 
-static struct stm_data dummy_stm = {
-       .name           = "dummy_stm",
-       .sw_start       = 0x0000,
-       .sw_end         = 0xffff,
-       .sw_nchannels   = 0xffff,
-       .packet         = dummy_stm_packet,
-};
+#define DUMMY_STM_MAX 32
+
+static struct stm_data dummy_stm[DUMMY_STM_MAX];
+
+static int nr_dummies = 4;
+
+module_param(nr_dummies, int, 0400);
+
+static unsigned int fail_mode;
+
+module_param(fail_mode, int, 0600);
+
+static int dummy_stm_link(struct stm_data *data, unsigned int master,
+                         unsigned int channel)
+{
+       if (fail_mode && (channel & fail_mode))
+               return -EINVAL;
+
+       return 0;
+}
 
 static int dummy_stm_init(void)
 {
-       return stm_register_device(NULL, &dummy_stm, THIS_MODULE);
+       int i, ret = -ENOMEM;
+
+       if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX)
+               return -EINVAL;
+
+       for (i = 0; i < nr_dummies; i++) {
+               dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
+               if (!dummy_stm[i].name)
+                       goto fail_unregister;
+
+               dummy_stm[i].sw_start           = 0x0000;
+               dummy_stm[i].sw_end             = 0xffff;
+               dummy_stm[i].sw_nchannels       = 0xffff;
+               dummy_stm[i].packet             = dummy_stm_packet;
+               dummy_stm[i].link               = dummy_stm_link;
+
+               ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE);
+               if (ret)
+                       goto fail_free;
+       }
+
+       return 0;
+
+fail_unregister:
+       for (i--; i >= 0; i--) {
+               stm_unregister_device(&dummy_stm[i]);
+fail_free:
+               kfree(dummy_stm[i].name);
+       }
+
+       return ret;
+
 }
 
 static void dummy_stm_exit(void)
 {
-       stm_unregister_device(&dummy_stm);
+       int i;
+
+       for (i = 0; i < nr_dummies; i++) {
+               stm_unregister_device(&dummy_stm[i]);
+               kfree(dummy_stm[i].name);
+       }
 }
 
 module_init(dummy_stm_init);
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
new file mode 100644 (file)
index 0000000..3da7b67
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Simple heartbeat STM source driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * Heartbeat STM source will send repetitive messages over STM devices to a
+ * trace host.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/stm.h>
+
+#define STM_HEARTBEAT_MAX      32
+
+static int nr_devs = 4;
+static int interval_ms = 10;
+
+module_param(nr_devs, int, 0400);
+module_param(interval_ms, int, 0600);
+
+static struct stm_heartbeat {
+       struct stm_source_data  data;
+       struct hrtimer          hrtimer;
+       unsigned int            active;
+} stm_heartbeat[STM_HEARTBEAT_MAX];
+
+static const char str[] = "heartbeat stm source driver is here to serve you";
+
+static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
+{
+       struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat,
+                                                      hrtimer);
+
+       stm_source_write(&heartbeat->data, 0, str, sizeof str);
+       if (heartbeat->active)
+               hrtimer_forward_now(hr, ms_to_ktime(interval_ms));
+
+       return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int stm_heartbeat_link(struct stm_source_data *data)
+{
+       struct stm_heartbeat *heartbeat =
+               container_of(data, struct stm_heartbeat, data);
+
+       heartbeat->active = 1;
+       hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms),
+                     HRTIMER_MODE_ABS);
+
+       return 0;
+}
+
+static void stm_heartbeat_unlink(struct stm_source_data *data)
+{
+       struct stm_heartbeat *heartbeat =
+               container_of(data, struct stm_heartbeat, data);
+
+       heartbeat->active = 0;
+       hrtimer_cancel(&heartbeat->hrtimer);
+}
+
+static int stm_heartbeat_init(void)
+{
+       int i, ret = -ENOMEM;
+
+       if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
+               return -EINVAL;
+
+       for (i = 0; i < nr_devs; i++) {
+               stm_heartbeat[i].data.name =
+                       kasprintf(GFP_KERNEL, "heartbeat.%d", i);
+               if (!stm_heartbeat[i].data.name)
+                       goto fail_unregister;
+
+               stm_heartbeat[i].data.nr_chans  = 1;
+               stm_heartbeat[i].data.link              = stm_heartbeat_link;
+               stm_heartbeat[i].data.unlink    = stm_heartbeat_unlink;
+               hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
+                            HRTIMER_MODE_ABS);
+               stm_heartbeat[i].hrtimer.function =
+                       stm_heartbeat_hrtimer_handler;
+
+               ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
+               if (ret)
+                       goto fail_free;
+       }
+
+       return 0;
+
+fail_unregister:
+       for (i--; i >= 0; i--) {
+               stm_source_unregister_device(&stm_heartbeat[i].data);
+fail_free:
+               kfree(stm_heartbeat[i].data.name);
+       }
+
+       return ret;
+}
+
+static void stm_heartbeat_exit(void)
+{
+       int i;
+
+       for (i = 0; i < nr_devs; i++) {
+               stm_source_unregister_device(&stm_heartbeat[i].data);
+               kfree(stm_heartbeat[i].data.name);
+       }
+}
+
+module_init(stm_heartbeat_init);
+module_exit(stm_heartbeat_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_heartbeat driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
index 11ab6d01adf63d1490c8474801d358ada4f657a8..1c061cb9bff05ff7a0923dca13bbc1b5a2104c3b 100644 (file)
@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy)
 {
        struct stm_device *stm = policy->stm;
 
+       /*
+        * stp_policy_release() will not call here if the policy is already
+        * unbound; other users should not either, as no link exists between
+        * this policy and anything else in that case
+        */
        if (WARN_ON_ONCE(!policy->stm))
                return;
 
-       mutex_lock(&stm->policy_mutex);
-       stm->policy = NULL;
-       mutex_unlock(&stm->policy_mutex);
+       lockdep_assert_held(&stm->policy_mutex);
 
+       stm->policy = NULL;
        policy->stm = NULL;
 
        stm_put_device(stm);
@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy)
 static void stp_policy_release(struct config_item *item)
 {
        struct stp_policy *policy = to_stp_policy(item);
+       struct stm_device *stm = policy->stm;
 
+       /* a policy *can* be unbound and still exist in configfs tree */
+       if (!stm)
+               return;
+
+       mutex_lock(&stm->policy_mutex);
        stp_policy_unbind(policy);
+       mutex_unlock(&stm->policy_mutex);
+
        kfree(policy);
 }
 
@@ -320,16 +332,17 @@ stp_policies_make(struct config_group *group, const char *name)
 
        /*
         * node must look like <device_name>.<policy_name>, where
-        * <device_name> is the name of an existing stm device and
-        * <policy_name> is an arbitrary string
+        * <device_name> is the name of an existing stm device; may
+        *               contain dots;
+        * <policy_name> is an arbitrary string; may not contain dots
         */
-       p = strchr(devname, '.');
+       p = strrchr(devname, '.');
        if (!p) {
                kfree(devname);
                return ERR_PTR(-EINVAL);
        }
 
-       *p++ = '\0';
+       *p = '\0';
 
        stm = stm_find_device(devname);
        kfree(devname);
index 95ece0292c991c8ad60f81c4a15fe4f3687d4504..4e8c6926260f3e8eec0ec8da70de8f13f86f0cc6 100644 (file)
@@ -45,6 +45,7 @@ struct stm_device {
        int                     major;
        unsigned int            sw_nmasters;
        struct stm_data         *data;
+       struct mutex            link_mutex;
        spinlock_t              link_lock;
        struct list_head        link_list;
        /* master allocation */
@@ -56,6 +57,7 @@ struct stm_device {
        container_of((_d), struct stm_device, dev)
 
 struct stm_output {
+       spinlock_t              lock;
        unsigned int            master;
        unsigned int            channel;
        unsigned int            nr_chans;
index 11fdadc68e53e57722b4d58892fdf2c644beba34..2a6eaf1122b4e9b742eb3777fb4b6b317c07b201 100644 (file)
@@ -103,6 +103,7 @@ enum ctype {
        CT_EXEC_USERSPACE,
        CT_ACCESS_USERSPACE,
        CT_WRITE_RO,
+       CT_WRITE_RO_AFTER_INIT,
        CT_WRITE_KERN,
 };
 
@@ -140,6 +141,7 @@ static char* cp_type[] = {
        "EXEC_USERSPACE",
        "ACCESS_USERSPACE",
        "WRITE_RO",
+       "WRITE_RO_AFTER_INIT",
        "WRITE_KERN",
 };
 
@@ -162,6 +164,7 @@ static DEFINE_SPINLOCK(lock_me_up);
 static u8 data_area[EXEC_SIZE];
 
 static const unsigned long rodata = 0xAA55AA55;
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
 
 module_param(recur_count, int, 0644);
 MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
@@ -503,11 +506,28 @@ static void lkdtm_do_action(enum ctype which)
                break;
        }
        case CT_WRITE_RO: {
-               unsigned long *ptr;
+               /* Explicitly cast away "const" for the test. */
+               unsigned long *ptr = (unsigned long *)&rodata;
 
-               ptr = (unsigned long *)&rodata;
+               pr_info("attempting bad rodata write at %p\n", ptr);
+               *ptr ^= 0xabcd1234;
 
-               pr_info("attempting bad write at %p\n", ptr);
+               break;
+       }
+       case CT_WRITE_RO_AFTER_INIT: {
+               unsigned long *ptr = &ro_after_init;
+
+               /*
+                * Verify we were written to during init. Since an Oops
+                * is considered a "success", a failure is to just skip the
+                * real test.
+                */
+               if ((*ptr & 0xAA) != 0xAA) {
+                       pr_info("%p was NOT written during init!?\n", ptr);
+                       break;
+               }
+
+               pr_info("attempting bad ro_after_init write at %p\n", ptr);
                *ptr ^= 0xabcd1234;
 
                break;
@@ -817,6 +837,9 @@ static int __init lkdtm_module_init(void)
        int n_debugfs_entries = 1; /* Assume only the direct entry */
        int i;
 
+       /* Make sure we can write to __ro_after_init values during __init */
+       ro_after_init |= 0xAA;
+
        /* Register debugfs interface */
        lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
        if (!lkdtm_debugfs_root) {
index 655f79db7899ffd0628714d51203847630a8075c..3e90bce70545a759b415081050d0762f4ae3c640 100644 (file)
@@ -760,6 +760,16 @@ const void * __init of_flat_dt_match_machine(const void *default_match,
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
+#ifndef __early_init_dt_declare_initrd
+static void __early_init_dt_declare_initrd(unsigned long start,
+                                          unsigned long end)
+{
+       initrd_start = (unsigned long)__va(start);
+       initrd_end = (unsigned long)__va(end);
+       initrd_below_start_ok = 1;
+}
+#endif
+
 /**
  * early_init_dt_check_for_initrd - Decode initrd location from flat tree
  * @node: reference to node containing initrd location ('chosen')
@@ -782,9 +792,7 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
                return;
        end = of_read_number(prop, len/4);
 
-       initrd_start = (unsigned long)__va(start);
-       initrd_end = (unsigned long)__va(end);
-       initrd_below_start_ok = 1;
+       __early_init_dt_declare_initrd(start, end);
 
        pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n",
                 (unsigned long long)start, (unsigned long long)end);
@@ -976,13 +984,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK
+#ifndef MIN_MEMBLOCK_ADDR
+#define MIN_MEMBLOCK_ADDR      __pa(PAGE_OFFSET)
+#endif
 #ifndef MAX_MEMBLOCK_ADDR
 #define MAX_MEMBLOCK_ADDR      ((phys_addr_t)~0)
 #endif
 
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-       const u64 phys_offset = __pa(PAGE_OFFSET);
+       const u64 phys_offset = MIN_MEMBLOCK_ADDR;
 
        if (!PAGE_ALIGNED(base)) {
                if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
index 1cbb8338edf391bd83c4d1b0bc0dff2cbbe56e75..827e4d3bbc7a46ef59222651a8020234addc82cb 100644 (file)
@@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
 #endif
 
 /* Return a pointer with offset calculated */
-#define __set_fixmap_offset(idx, phys, flags)                \
-({                                                           \
-       unsigned long addr;                                   \
-       __set_fixmap(idx, phys, flags);                       \
-       addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
-       addr;                                                 \
+#define __set_fixmap_offset(idx, phys, flags)                          \
+({                                                                     \
+       unsigned long ________addr;                                     \
+       __set_fixmap(idx, phys, flags);                                 \
+       ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1));   \
+       ________addr;                                                   \
 })
 
 #define set_fixmap_offset(idx, phys) \
index c4bd0e2c173c011e37f6eef7acfb9bc3920fcca6..772c784ba76301dbceca9f3c0991c626a893e076 100644 (file)
        .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
                VMLINUX_SYMBOL(__start_rodata) = .;                     \
                *(.rodata) *(.rodata.*)                                 \
+               *(.data..ro_after_init) /* Read only after init */      \
                *(__vermagic)           /* Kernel version magic */      \
                . = ALIGN(8);                                           \
                VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
index 9006c4e75cf737a90335eadcd73e14d59b0f753e..3d8dcdd1aeae902ad2f587e2f2f875a77ef985f7 100644 (file)
@@ -163,4 +163,13 @@ struct amba_device name##_device = {                               \
 #define module_amba_driver(__amba_drv) \
        module_driver(__amba_drv, amba_driver_register, amba_driver_unregister)
 
+/*
+ * builtin_amba_driver() - Helper macro for drivers that don't do anything
+ * special in driver initcall.  This eliminates a lot of boilerplate.  Each
+ * driver may only use this macro once, and calling it replaces the instance
+ * device_initcall().
+ */
+#define builtin_amba_driver(__amba_drv) \
+       builtin_driver(__amba_drv, amba_driver_register)
+
 #endif
index 17e7e82d2aa758f9888419a9c03aa4059e16b247..1be04f8c563a0c60bdfca72a36c120ec96ef327c 100644 (file)
 #define SMP_CACHE_BYTES L1_CACHE_BYTES
 #endif
 
+/*
+ * __read_mostly is used to keep rarely changing variables out of frequently
+ * updated cachelines. If an architecture doesn't support it, ignore the
+ * hint.
+ */
 #ifndef __read_mostly
 #define __read_mostly
 #endif
 
+/*
+ * __ro_after_init is used to mark things that are read-only after init (i.e.
+ * after mark_rodata_ro() has been called). These are effectively read-only,
+ * but may get written to during init, so can't live in .rodata (via "const").
+ */
+#ifndef __ro_after_init
+#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+#endif
+
 #ifndef ____cacheline_aligned
 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
 #endif
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
new file mode 100644 (file)
index 0000000..7d41026
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_CORESIGHT_PMU_H
+#define _LINUX_CORESIGHT_PMU_H
+
+#define CORESIGHT_ETM_PMU_NAME "cs_etm"
+#define CORESIGHT_ETM_PMU_SEED  0x10
+
+/* ETMv3.5/PTM's ETMCR config bit */
+#define ETM_OPT_CYCACC  12
+#define ETM_OPT_TS      28
+
+static inline int coresight_get_trace_id(int cpu)
+{
+       /*
+        * A trace ID of value 0 is invalid, so let's start at some
+        * random value that fits in 7 bits and go from there.  Since
+        * the common convention is to have data trace IDs be I(N) + 1,
+        * set instruction trace IDs as a function of the CPU number.
+        */
+       return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
+}
+
+#endif
diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h
new file mode 100644 (file)
index 0000000..a978bb8
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __LINUX_CORESIGHT_STM_H_
+#define __LINUX_CORESIGHT_STM_H_
+
+#include <uapi/linux/coresight-stm.h>
+
+#endif
index a7cabfa23b55823773cb91e9bfd7996ae34f3f97..385d62e64abb00218d5f52f32bd1875dde5b7b43 100644 (file)
@@ -14,6 +14,7 @@
 #define _LINUX_CORESIGHT_H
 
 #include <linux/device.h>
+#include <linux/perf_event.h>
 #include <linux/sched.h>
 
 /* Peripheral id registers (0xFD0-0xFEC) */
@@ -152,7 +153,6 @@ struct coresight_connection {
                by @coresight_ops.
  * @dev:       The device entity associated to this component.
  * @refcnt:    keep track of what is in use.
- * @path_link: link of current component into the path being enabled.
  * @orphan:    true if the component has connections that haven't been linked.
  * @enable:    'true' if component is currently part of an active path.
  * @activated: 'true' only if a _sink_ has been activated.  A sink can be
@@ -168,7 +168,6 @@ struct coresight_device {
        const struct coresight_ops *ops;
        struct device dev;
        atomic_t *refcnt;
-       struct list_head path_link;
        bool orphan;
        bool enable;    /* true only if configured as part of a path */
        bool activated; /* true only if a sink is part of a path */
@@ -183,12 +182,29 @@ struct coresight_device {
 /**
  * struct coresight_ops_sink - basic operations for a sink
  * Operations available for sinks
- * @enable:    enables the sink.
- * @disable:   disables the sink.
+ * @enable:            enables the sink.
+ * @disable:           disables the sink.
+ * @alloc_buffer:      initialises perf's ring buffer for trace collection.
+ * @free_buffer:       release memory allocated in @get_config.
+ * @set_buffer:                initialises buffer mechanic before a trace session.
+ * @reset_buffer:      finalises buffer mechanic after a trace session.
+ * @update_buffer:     update buffer pointers after a trace session.
  */
 struct coresight_ops_sink {
-       int (*enable)(struct coresight_device *csdev);
+       int (*enable)(struct coresight_device *csdev, u32 mode);
        void (*disable)(struct coresight_device *csdev);
+       void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
+                             void **pages, int nr_pages, bool overwrite);
+       void (*free_buffer)(void *config);
+       int (*set_buffer)(struct coresight_device *csdev,
+                         struct perf_output_handle *handle,
+                         void *sink_config);
+       unsigned long (*reset_buffer)(struct coresight_device *csdev,
+                                     struct perf_output_handle *handle,
+                                     void *sink_config, bool *lost);
+       void (*update_buffer)(struct coresight_device *csdev,
+                             struct perf_output_handle *handle,
+                             void *sink_config);
 };
 
 /**
@@ -205,14 +221,18 @@ struct coresight_ops_link {
 /**
  * struct coresight_ops_source - basic operations for a source
  * Operations available for sources.
+ * @cpu_id:    returns the value of the CPU number this component
+ *             is associated to.
  * @trace_id:  returns the value of the component's trace ID as known
              to the HW.
*             to the HW.
  * @enable:    enables tracing for a source.
  * @disable:   disables tracing for a source.
  */
 struct coresight_ops_source {
+       int (*cpu_id)(struct coresight_device *csdev);
        int (*trace_id)(struct coresight_device *csdev);
-       int (*enable)(struct coresight_device *csdev);
+       int (*enable)(struct coresight_device *csdev,
+                     struct perf_event_attr *attr,  u32 mode);
        void (*disable)(struct coresight_device *csdev);
 };
 
index 47be3ad7d3e5bad63b48a8fa344dbea65c0a97dd..333d0ca6940f0214f37b69377716d58852240a3c 100644 (file)
@@ -299,7 +299,7 @@ typedef struct {
        void *open_protocol_information;
        void *protocols_per_handle;
        void *locate_handle_buffer;
-       void *locate_protocol;
+       efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
        void *install_multiple_protocol_interfaces;
        void *uninstall_multiple_protocol_interfaces;
        void *calculate_crc32;
@@ -599,6 +599,10 @@ void efi_native_runtime_setup(void);
 #define EFI_PROPERTIES_TABLE_GUID \
     EFI_GUID(  0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
 
+#define EFI_RNG_PROTOCOL_GUID \
+       EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
+                0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+
 typedef struct {
        efi_guid_t guid;
        u64 table;
index 685c262e0be848ca049ee041d00d389d3cc327fe..b0eb06423d5eccba6cb850078af6ffc60d97c382 100644 (file)
@@ -96,9 +96,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
                                struct address_space *mapping,
                                pgoff_t idx, unsigned long address);
 
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
-#endif
 
 extern int hugepages_treat_as_movable;
 extern int sysctl_hugetlb_shm_group;
index b449f378f995ae647077f521d9f7af3af9480a70..aedb254abc37204a091b790eedfde857dc22155f 100644 (file)
@@ -142,6 +142,10 @@ void prepare_namespace(void);
 void __init load_default_modules(void);
 int __init init_rootfs(void);
 
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
 extern void (*late_time_init)(void);
 
 extern bool initcall_debug;
index 9a2e50337af9fd233656b8fb6f06a91c10b2e7bf..cccaf4a29e9f02c9a60b65f73a523a69efa5af3a 100644 (file)
@@ -34,6 +34,8 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
 
 int dev_pm_opp_get_opp_count(struct device *dev);
 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
 
 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -55,6 +57,14 @@ int dev_pm_opp_enable(struct device *dev, unsigned long freq);
 int dev_pm_opp_disable(struct device *dev, unsigned long freq);
 
 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
+                               unsigned int count);
+void dev_pm_opp_put_supported_hw(struct device *dev);
+int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
+void dev_pm_opp_put_prop_name(struct device *dev);
+int dev_pm_opp_set_regulator(struct device *dev, const char *name);
+void dev_pm_opp_put_regulator(struct device *dev);
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
 #else
 static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
@@ -81,6 +91,16 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
        return 0;
 }
 
+static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+       return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+       return 0;
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 {
        return NULL;
@@ -129,6 +149,35 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
 {
        return ERR_PTR(-EINVAL);
 }
+
+static inline int dev_pm_opp_set_supported_hw(struct device *dev,
+                                             const u32 *versions,
+                                             unsigned int count)
+{
+       return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
+
+static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+       return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
+
+static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+       return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+
+static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+       return -EINVAL;
+}
+
 #endif         /* CONFIG_PM_OPP */
 
 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
index 9d0083d364e642b6ac68ae56cdae3bc197ffebde..8369d8a8cabd7f1e43505ac404253974ce101db0 100644 (file)
@@ -50,6 +50,8 @@ struct stm_device;
  * @sw_end:            last STP master available to software
  * @sw_nchannels:      number of STP channels per master
  * @sw_mmiosz:         size of one channel's IO space, for mmap, optional
+ * @hw_override:       masters in the STP stream will not match the ones
+ *                     assigned by software, but are up to the STM hardware
  * @packet:            callback that sends an STP packet
  * @mmio_addr:         mmap callback, optional
  * @link:              called when a new stm_source gets linked to us, optional
@@ -67,6 +69,16 @@ struct stm_device;
  * description. That is, the lowest master that can be allocated to software
  * writers is @sw_start and data from this writer will appear is @sw_start
  * master in the STP stream.
+ *
+ * The @packet callback should adhere to the following rules:
+ *   1) it must return the number of bytes it consumed from the payload;
+ *   2) therefore, if it sent a packet that does not have payload (like FLAG),
+ *      it must return zero;
+ *   3) if it does not support the requested packet type/flag combination,
+ *      it must return -ENOTSUPP.
+ *
+ * The @unlink callback is called when there are no more active writers so
+ * that the master/channel can be quiesced.
  */
 struct stm_data {
        const char              *name;
@@ -75,6 +87,7 @@ struct stm_data {
        unsigned int            sw_end;
        unsigned int            sw_nchannels;
        unsigned int            sw_mmiosz;
+       unsigned int            hw_override;
        ssize_t                 (*packet)(struct stm_data *, unsigned int,
                                          unsigned int, unsigned int,
                                          unsigned int, unsigned int,
diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h
new file mode 100644 (file)
index 0000000..7e4272c
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef __UAPI_CORESIGHT_STM_H_
+#define __UAPI_CORESIGHT_STM_H_
+
+#define STM_FLAG_TIMESTAMPED   BIT(3)
+#define STM_FLAG_GUARANTEED    BIT(7)
+
+/*
+ * The CoreSight STM supports guaranteed and invariant timing
+ * transactions.  Guaranteed transactions are guaranteed to be
+ * traced, this might involve stalling the bus or system to
+ * ensure the transaction is accepted by the STM.  While invariant
+ * timing transactions are not guaranteed to be traced, they
+ * will take an invariant amount of time regardless of the
+ * state of the STM.
+ */
+enum {
+       STM_OPTION_GUARANTEED = 0,
+       STM_OPTION_INVARIANT,
+};
+
+#endif
index 9e64d7097f1ad4d5744755c977cac583debbaf38..fbafa271531cb417a60bb1366baacc04f2e81880 100644 (file)
@@ -93,9 +93,6 @@ static int kernel_init(void *);
 extern void init_IRQ(void);
 extern void fork_init(void);
 extern void radix_tree_init(void);
-#ifndef CONFIG_DEBUG_RODATA
-static inline void mark_rodata_ro(void) { }
-#endif
 
 /*
  * Debug helper: via this flag we know that we are in 'early bootup code'
@@ -929,6 +926,28 @@ static int try_to_run_init_process(const char *init_filename)
 
 static noinline void __init kernel_init_freeable(void);
 
+#ifdef CONFIG_DEBUG_RODATA
+static bool rodata_enabled = true;
+static int __init set_debug_rodata(char *str)
+{
+       return strtobool(str, &rodata_enabled);
+}
+__setup("rodata=", set_debug_rodata);
+
+static void mark_readonly(void)
+{
+       if (rodata_enabled)
+               mark_rodata_ro();
+       else
+               pr_info("Kernel memory protection disabled.\n");
+}
+#else
+static inline void mark_readonly(void)
+{
+       pr_warn("This architecture does not have kernel memory protection.\n");
+}
+#endif
+
 static int __ref kernel_init(void *unused)
 {
        int ret;
@@ -937,7 +956,7 @@ static int __ref kernel_init(void *unused)
        /* need to finish all async __init code before freeing the memory */
        async_synchronize_full();
        free_initmem();
-       mark_rodata_ro();
+       mark_readonly();
        system_state = SYSTEM_RUNNING;
        numa_default_policy();
 
index e1dbf4a2c69e4ca9721c22184cb9f800325b9194..90ff129c88a27c50e33be234be695650e7210494 100644 (file)
@@ -153,13 +153,11 @@ static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp)
        } else {
                kdb_printf("%s: failed to set breakpoint at 0x%lx\n",
                           __func__, bp->bp_addr);
-#ifdef CONFIG_DEBUG_RODATA
                if (!bp->bp_type) {
                        kdb_printf("Software breakpoints are unavailable.\n"
-                                  "  Change the kernel CONFIG_DEBUG_RODATA=n\n"
+                                  "  Boot the kernel with rodata=off\n"
                                   "  OR use hw breaks: help bph\n");
                }
-#endif
                return 1;
        }
        return 0;
index 95e47d2f2c67505708513e774e2463c1c9dfb4ff..b76bb498148ea83e7cf27f7f10b9e545be4d6736 100644 (file)
@@ -1885,8 +1885,13 @@ event_sched_in(struct perf_event *event,
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
 
-       event->state = PERF_EVENT_STATE_ACTIVE;
-       event->oncpu = smp_processor_id();
+       WRITE_ONCE(event->oncpu, smp_processor_id());
+       /*
+        * Order event::oncpu write to happen before the ACTIVE state
+        * is visible.
+        */
+       smp_wmb();
+       WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
 
        /*
         * Unthrottle events, since we scheduled we might have missed several
@@ -2367,6 +2372,29 @@ void perf_event_enable(struct perf_event *event)
 }
 EXPORT_SYMBOL_GPL(perf_event_enable);
 
+static int __perf_event_stop(void *info)
+{
+       struct perf_event *event = info;
+
+       /* for AUX events, our job is done if the event is already inactive */
+       if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
+               return 0;
+
+       /* matches smp_wmb() in event_sched_in() */
+       smp_rmb();
+
+       /*
+        * There is a window with interrupts enabled before we get here,
+        * so we need to check again lest we try to stop another CPU's event.
+        */
+       if (READ_ONCE(event->oncpu) != smp_processor_id())
+               return -EAGAIN;
+
+       event->pmu->stop(event, PERF_EF_UPDATE);
+
+       return 0;
+}
+
 static int _perf_event_refresh(struct perf_event *event, int refresh)
 {
        /*
@@ -4616,6 +4644,8 @@ static void perf_mmap_open(struct vm_area_struct *vma)
                event->pmu->event_mapped(event);
 }
 
+static void perf_pmu_output_stop(struct perf_event *event);
+
 /*
  * A buffer can be mmap()ed multiple times; either directly through the same
  * event, or through other events by use of perf_event_set_output().
@@ -4643,10 +4673,22 @@ static void perf_mmap_close(struct vm_area_struct *vma)
         */
        if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
            atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
+               /*
+                * Stop all AUX events that are writing to this buffer,
+                * so that we can free its AUX pages and corresponding PMU
+                * data. Note that after rb::aux_mmap_count dropped to zero,
+                * they won't start any more (see perf_aux_output_begin()).
+                */
+               perf_pmu_output_stop(event);
+
+               /* now it's safe to free the pages */
                atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
                vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
 
+               /* this has to be the last one */
                rb_free_aux(rb);
+               WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+
                mutex_unlock(&event->mmap_mutex);
        }
 
@@ -5717,6 +5759,80 @@ next:
        rcu_read_unlock();
 }
 
+struct remote_output {
+       struct ring_buffer      *rb;
+       int                     err;
+};
+
+static void __perf_event_output_stop(struct perf_event *event, void *data)
+{
+       struct perf_event *parent = event->parent;
+       struct remote_output *ro = data;
+       struct ring_buffer *rb = ro->rb;
+
+       if (!has_aux(event))
+               return;
+
+       if (!parent)
+               parent = event;
+
+       /*
+        * In case of inheritance, it will be the parent that links to the
+        * ring-buffer, but it will be the child that's actually using it:
+        */
+       if (rcu_dereference(parent->rb) == rb)
+               ro->err = __perf_event_stop(event);
+}
+
+static int __perf_pmu_output_stop(void *info)
+{
+       struct perf_event *event = info;
+       struct pmu *pmu = event->pmu;
+       struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+       struct remote_output ro = {
+               .rb     = event->rb,
+       };
+
+       rcu_read_lock();
+       perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro);
+       if (cpuctx->task_ctx)
+               perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
+                                  &ro);
+       rcu_read_unlock();
+
+       return ro.err;
+}
+
+static void perf_pmu_output_stop(struct perf_event *event)
+{
+       struct perf_event *iter;
+       int err, cpu;
+
+restart:
+       rcu_read_lock();
+       list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
+               /*
+                * For per-CPU events, we need to make sure that neither they
+                * nor their children are running; for cpu==-1 events it's
+                * sufficient to stop the event itself if it's active, since
+                * it can't have children.
+                */
+               cpu = iter->cpu;
+               if (cpu == -1)
+                       cpu = READ_ONCE(iter->oncpu);
+
+               if (cpu == -1)
+                       continue;
+
+               err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
+               if (err == -EAGAIN) {
+                       rcu_read_unlock();
+                       goto restart;
+               }
+       }
+       rcu_read_unlock();
+}
+
 /*
  * task tracking -- fork/exit
  *
@@ -8461,6 +8577,7 @@ SYSCALL_DEFINE5(perf_event_open,
                                        f_flags);
        if (IS_ERR(event_file)) {
                err = PTR_ERR(event_file);
+               event_file = NULL;
                goto err_context;
        }
 
index 2bbad9c1274c3199338e653bbb5c8bd640815fa7..2b229fdcfc099f608c601e83b43c2fcdb1eba12b 100644 (file)
@@ -11,7 +11,6 @@
 struct ring_buffer {
        atomic_t                        refcount;
        struct rcu_head                 rcu_head;
-       struct irq_work                 irq_work;
 #ifdef CONFIG_PERF_USE_VMALLOC
        struct work_struct              work;
        int                             page_order;     /* allocation order  */
index 014b6952819463e165a8f273839af53424c258a3..084be7d41bcf60dd9ecd7f7709755e6d05fb3f6e 100644 (file)
@@ -221,8 +221,6 @@ void perf_output_end(struct perf_output_handle *handle)
        rcu_read_unlock();
 }
 
-static void rb_irq_work(struct irq_work *work);
-
 static void
 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 {
@@ -243,16 +241,6 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 
        INIT_LIST_HEAD(&rb->event_list);
        spin_lock_init(&rb->event_lock);
-       init_irq_work(&rb->irq_work, rb_irq_work);
-}
-
-static void ring_buffer_put_async(struct ring_buffer *rb)
-{
-       if (!atomic_dec_and_test(&rb->refcount))
-               return;
-
-       rb->rcu_head.next = (void *)rb;
-       irq_work_queue(&rb->irq_work);
 }
 
 /*
@@ -264,6 +252,10 @@ static void ring_buffer_put_async(struct ring_buffer *rb)
  * The ordering is similar to that of perf_output_{begin,end}, with
  * the exception of (B), which should be taken care of by the pmu
  * driver, since ordering rules will differ depending on hardware.
+ *
+ * Call this from pmu::start(); see the comment in perf_aux_output_end()
+ * about its use in pmu callbacks. Both can also be called from the PMI
+ * handler if needed.
  */
 void *perf_aux_output_begin(struct perf_output_handle *handle,
                            struct perf_event *event)
@@ -287,6 +279,13 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
        if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
                goto err;
 
+       /*
+        * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
+        * the aux buffer is in perf_mmap_close(), about to get freed.
+        */
+       if (!atomic_read(&rb->aux_mmap_count))
+               goto err_put;
+
        /*
         * Nesting is not supported for AUX area, make sure nested
         * writers are caught early
@@ -328,10 +327,11 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
        return handle->rb->aux_priv;
 
 err_put:
+       /* can't be last */
        rb_free_aux(rb);
 
 err:
-       ring_buffer_put_async(rb);
+       ring_buffer_put(rb);
        handle->event = NULL;
 
        return NULL;
@@ -342,6 +342,10 @@ err:
  * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
  * pmu driver's responsibility to observe ordering rules of the hardware,
  * so that all the data is externally visible before this is called.
+ *
+ * Note: this has to be called from pmu::stop() callback, as the assumption
+ * of the AUX buffer management code is that after pmu::stop(), the AUX
+ * transaction must be stopped and therefore drop the AUX reference count.
  */
 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
                         bool truncated)
@@ -389,8 +393,9 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
        handle->event = NULL;
 
        local_set(&rb->aux_nest, 0);
+       /* can't be last */
        rb_free_aux(rb);
-       ring_buffer_put_async(rb);
+       ring_buffer_put(rb);
 }
 
 /*
@@ -467,6 +472,33 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
        __free_page(page);
 }
 
+static void __rb_free_aux(struct ring_buffer *rb)
+{
+       int pg;
+
+       /*
+        * Should never happen, the last reference should be dropped from
+        * perf_mmap_close() path, which first stops aux transactions (which
+        * in turn are the atomic holders of aux_refcount) and then does the
+        * last rb_free_aux().
+        */
+       WARN_ON_ONCE(in_atomic());
+
+       if (rb->aux_priv) {
+               rb->free_aux(rb->aux_priv);
+               rb->free_aux = NULL;
+               rb->aux_priv = NULL;
+       }
+
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
+
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
+}
+
 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
                 pgoff_t pgoff, int nr_pages, long watermark, int flags)
 {
@@ -555,45 +587,15 @@ out:
        if (!ret)
                rb->aux_pgoff = pgoff;
        else
-               rb_free_aux(rb);
+               __rb_free_aux(rb);
 
        return ret;
 }
 
-static void __rb_free_aux(struct ring_buffer *rb)
-{
-       int pg;
-
-       if (rb->aux_priv) {
-               rb->free_aux(rb->aux_priv);
-               rb->free_aux = NULL;
-               rb->aux_priv = NULL;
-       }
-
-       if (rb->aux_nr_pages) {
-               for (pg = 0; pg < rb->aux_nr_pages; pg++)
-                       rb_free_aux_page(rb, pg);
-
-               kfree(rb->aux_pages);
-               rb->aux_nr_pages = 0;
-       }
-}
-
 void rb_free_aux(struct ring_buffer *rb)
 {
        if (atomic_dec_and_test(&rb->aux_refcount))
-               irq_work_queue(&rb->irq_work);
-}
-
-static void rb_irq_work(struct irq_work *work)
-{
-       struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
-
-       if (!atomic_read(&rb->aux_refcount))
                __rb_free_aux(rb);
-
-       if (rb->rcu_head.next == (void *)rb)
-               call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
index 4cac81ec225e09af4cfd69c36d63a574a8418110..0be02ad561e9e346c01a65434873f9960472047a 100644 (file)
 #include <linux/sort.h>
 #include <asm/uaccess.h>
 
+#ifndef ARCH_HAS_RELATIVE_EXTABLE
+#define ex_to_insn(x)  ((x)->insn)
+#else
+static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
+{
+       return (unsigned long)&x->insn + x->insn;
+}
+#endif
+
 #ifndef ARCH_HAS_SORT_EXTABLE
+#ifndef ARCH_HAS_RELATIVE_EXTABLE
+#define swap_ex                NULL
+#else
+static void swap_ex(void *a, void *b, int size)
+{
+       struct exception_table_entry *x = a, *y = b, tmp;
+       int delta = b - a;
+
+       tmp = *x;
+       x->insn = y->insn + delta;
+       y->insn = tmp.insn - delta;
+
+#ifdef swap_ex_entry_fixup
+       swap_ex_entry_fixup(x, y, tmp, delta);
+#else
+       x->fixup = y->fixup + delta;
+       y->fixup = tmp.fixup - delta;
+#endif
+}
+#endif /* ARCH_HAS_RELATIVE_EXTABLE */
+
 /*
  * The exception table needs to be sorted so that the binary
  * search that we use to find entries in it works properly.
@@ -26,9 +56,9 @@ static int cmp_ex(const void *a, const void *b)
        const struct exception_table_entry *x = a, *y = b;
 
        /* avoid overflow */
-       if (x->insn > y->insn)
+       if (ex_to_insn(x) > ex_to_insn(y))
                return 1;
-       if (x->insn < y->insn)
+       if (ex_to_insn(x) < ex_to_insn(y))
                return -1;
        return 0;
 }
@@ -37,7 +67,7 @@ void sort_extable(struct exception_table_entry *start,
                  struct exception_table_entry *finish)
 {
        sort(start, finish - start, sizeof(struct exception_table_entry),
-            cmp_ex, NULL);
+            cmp_ex, swap_ex);
 }
 
 #ifdef CONFIG_MODULES
@@ -48,13 +78,15 @@ void sort_extable(struct exception_table_entry *start,
 void trim_init_extable(struct module *m)
 {
        /*trim the beginning*/
-       while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
+       while (m->num_exentries &&
+              within_module_init(ex_to_insn(&m->extable[0]), m)) {
                m->extable++;
                m->num_exentries--;
        }
        /*trim the end*/
        while (m->num_exentries &&
-               within_module_init(m->extable[m->num_exentries-1].insn, m))
+              within_module_init(ex_to_insn(&m->extable[m->num_exentries - 1]),
+                                 m))
                m->num_exentries--;
 }
 #endif /* CONFIG_MODULES */
@@ -81,13 +113,13 @@ search_extable(const struct exception_table_entry *first,
                 * careful, the distance between value and insn
                 * can be larger than MAX_LONG:
                 */
-               if (mid->insn < value)
+               if (ex_to_insn(mid) < value)
                        first = mid + 1;
-               else if (mid->insn > value)
+               else if (ex_to_insn(mid) > value)
                        last = mid - 1;
                else
                        return mid;
-        }
-        return NULL;
+       }
+       return NULL;
 }
 #endif
index c2423d913b46bd0e659ea4d4c057a3af6119c2d4..a2c0d620ca80fcca79260a1899e8a8d38c354247 100644 (file)
@@ -266,9 +266,9 @@ do_file(char const *const fname)
                break;
        }  /* end switch */
        if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
-       ||  r2(&ehdr->e_type) != ET_EXEC
+       ||  (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN)
        ||  ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
-               fprintf(stderr, "unrecognized ET_EXEC file %s\n", fname);
+               fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
                fail_file();
        }
 
@@ -282,12 +282,13 @@ do_file(char const *const fname)
        case EM_386:
        case EM_X86_64:
        case EM_S390:
+       case EM_AARCH64:
+       case EM_PARISC:
                custom_sort = sort_relative_table;
                break;
        case EM_ARCOMPACT:
        case EM_ARCV2:
        case EM_ARM:
-       case EM_AARCH64:
        case EM_MICROBLAZE:
        case EM_MIPS:
        case EM_XTENSA:
@@ -304,7 +305,7 @@ do_file(char const *const fname)
                if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
                ||  r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
                        fprintf(stderr,
-                               "unrecognized ET_EXEC file: %s\n", fname);
+                               "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
                        fail_file();
                }
                do32(ehdr, fname, custom_sort);
@@ -314,7 +315,7 @@ do_file(char const *const fname)
                if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
                ||  r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
                        fprintf(stderr,
-                               "unrecognized ET_EXEC file: %s\n", fname);
+                               "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
                        fail_file();
                }
                do64(ghdr, fname, custom_sort);
index 9b94ce5209170fcb6105723aea00bf1214fc4032..4685a40777cc7e2c4806f8af38d57abc077ccbd4 100644 (file)
@@ -60,7 +60,9 @@ struct branch {
        u64 misc;
 };
 
-static size_t intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+static size_t
+intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+                        struct perf_evlist *evlist __maybe_unused)
 {
        return INTEL_BTS_AUXTRACE_PRIV_SIZE;
 }
index b02af064f0f98333b6f90a5d6dd8778e6a7a6bb4..e5c1f2e21f870bf53047c311829a2c653ddbee56 100644 (file)
@@ -273,7 +273,9 @@ intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
        return attr;
 }
 
-static size_t intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+static size_t
+intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+                       struct perf_evlist *evlist __maybe_unused)
 {
        return INTEL_PT_AUXTRACE_PRIV_SIZE;
 }
index 99d127fe9c35e500ca74ea871fd4373fcea15d68..ac369c494036e0c7a020679d5d8e3d395aec96db 100644 (file)
@@ -626,12 +626,16 @@ static int __cmd_inject(struct perf_inject *inject)
        ret = perf_session__process_events(session);
 
        if (!file_out->is_pipe) {
-               if (inject->build_ids) {
+               if (inject->build_ids)
                        perf_header__set_feat(&session->header,
                                              HEADER_BUILD_ID);
-                       if (inject->have_auxtrace)
-                               dsos__hit_all(session);
-               }
+               /*
+                * Keep all buildids when there is unprocessed AUX data because
+                * it is not known which ones the AUX trace hits.
+                */
+               if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
+                   inject->have_auxtrace && !inject->itrace_synth_opts.set)
+                       dsos__hit_all(session);
                /*
                 * The AUX areas have been removed and replaced with
                 * synthesized hardware events, so clear the feature flag and
index 7f10430af39c3ac9e47f4da1aca93e37a8f9cf87..cc1c9ce5cc5628c4cc99c5b98422d964d976c77e 100644 (file)
@@ -478,10 +478,11 @@ void auxtrace_heap__pop(struct auxtrace_heap *heap)
                         heap_array[last].ordinal);
 }
 
-size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
+size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
+                                      struct perf_evlist *evlist)
 {
        if (itr)
-               return itr->info_priv_size(itr);
+               return itr->info_priv_size(itr, evlist);
        return 0;
 }
 
@@ -852,7 +853,7 @@ int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
        int err;
 
        pr_debug2("Synthesizing auxtrace information\n");
-       priv_size = auxtrace_record__info_priv_size(itr);
+       priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
        ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
        if (!ev)
                return -ENOMEM;
index b86f90db1352a6c8635e3ea5d02aa3c21bccc323..e5a8e2d4f2af4e9b717be7ce27c587f569983027 100644 (file)
@@ -293,7 +293,8 @@ struct auxtrace_record {
        int (*recording_options)(struct auxtrace_record *itr,
                                 struct perf_evlist *evlist,
                                 struct record_opts *opts);
-       size_t (*info_priv_size)(struct auxtrace_record *itr);
+       size_t (*info_priv_size)(struct auxtrace_record *itr,
+                                struct perf_evlist *evlist);
        int (*info_fill)(struct auxtrace_record *itr,
                         struct perf_session *session,
                         struct auxtrace_info_event *auxtrace_info,
@@ -429,7 +430,8 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
 int auxtrace_record__options(struct auxtrace_record *itr,
                             struct perf_evlist *evlist,
                             struct record_opts *opts);
-size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr);
+size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
+                                      struct perf_evlist *evlist);
 int auxtrace_record__info_fill(struct auxtrace_record *itr,
                               struct perf_session *session,
                               struct auxtrace_info_event *auxtrace_info,
index 10af1e7524fbd24de791c38fa23c7d730d54a193..6523e1a8eea5c7dfd952a5140c52501b4f645dd6 100644 (file)
@@ -7,6 +7,10 @@
 #include <stdlib.h>
 #include "asm/bug.h"
 
+static int max_cpu_num;
+static int max_node_num;
+static int *cpunode_map;
+
 static struct cpu_map *cpu_map__default_new(void)
 {
        struct cpu_map *cpus;
@@ -435,6 +439,32 @@ out:
                pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
 }
 
+int cpu__max_node(void)
+{
+       if (unlikely(!max_node_num))
+               set_max_node_num();
+
+       return max_node_num;
+}
+
+int cpu__max_cpu(void)
+{
+       if (unlikely(!max_cpu_num))
+               set_max_cpu_num();
+
+       return max_cpu_num;
+}
+
+int cpu__get_node(int cpu)
+{
+       if (unlikely(cpunode_map == NULL)) {
+               pr_debug("cpu_map not initialized\n");
+               return -1;
+       }
+
+       return cpunode_map[cpu];
+}
+
 static int init_cpunode_map(void)
 {
        int i;
index 85f7772457fa091655d62212067f2edddf6e55ae..d6184ba929b65e9426ef095194d69419818fcea5 100644 (file)
@@ -56,37 +56,11 @@ static inline bool cpu_map__empty(const struct cpu_map *map)
        return map ? map->map[0] == -1 : true;
 }
 
-int max_cpu_num;
-int max_node_num;
-int *cpunode_map;
-
 int cpu__setup_cpunode_map(void);
 
-static inline int cpu__max_node(void)
-{
-       if (unlikely(!max_node_num))
-               pr_debug("cpu_map not initialized\n");
-
-       return max_node_num;
-}
-
-static inline int cpu__max_cpu(void)
-{
-       if (unlikely(!max_cpu_num))
-               pr_debug("cpu_map not initialized\n");
-
-       return max_cpu_num;
-}
-
-static inline int cpu__get_node(int cpu)
-{
-       if (unlikely(cpunode_map == NULL)) {
-               pr_debug("cpu_map not initialized\n");
-               return -1;
-       }
-
-       return cpunode_map[cpu];
-}
+int cpu__max_node(void);
+int cpu__max_cpu(void);
+int cpu__get_node(int cpu);
 
 int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
                       int (*f)(struct cpu_map *map, int cpu, void *data),
index b4b96120fc3b4336749e97a7ede90725c1c4340e..aa6cf40388f122530c2c10bb3e11c0130cdb17c1 100644 (file)
@@ -1486,7 +1486,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
        perf_evlist__update_id_pos(evlist);
 
        evlist__for_each(evlist, evsel) {
-               err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
+               err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
                if (err < 0)
                        goto out_err;
        }
index 397fb4ed3c97b6deffeffd8f69bbceb886ac58ea..1eb4d02e3968ead42d127439b39acb3d825451c7 100644 (file)
@@ -988,6 +988,16 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
                                     0);
 }
 
+int perf_evsel__disable(struct perf_evsel *evsel)
+{
+       int nthreads = thread_map__nr(evsel->threads);
+       int ncpus = cpu_map__nr(evsel->cpus);
+
+       return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+                                    PERF_EVENT_IOC_DISABLE,
+                                    0);
+}
+
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
        if (ncpus == 0 || nthreads == 0)
index 0e49bd742c639c02d1aef18f421c0204faa95823..eaa4c733c97618fe639ca0ec38bac746840058d8 100644 (file)
@@ -228,6 +228,7 @@ int perf_evsel__append_filter(struct perf_evsel *evsel,
 int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
                             const char *filter);
 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__disable(struct perf_evsel *evsel);
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
                             struct cpu_map *cpus);
index 468de95bc8bb1a73a666fc6eb92275d0ec1cd4f9..010ff659b82fa667c70fffa85fd8e654c7d078d4 100644 (file)
@@ -224,14 +224,6 @@ static int process_event_stub(struct perf_tool *tool __maybe_unused,
        return 0;
 }
 
-static int process_build_id_stub(struct perf_tool *tool __maybe_unused,
-                                union perf_event *event __maybe_unused,
-                                struct perf_session *session __maybe_unused)
-{
-       dump_printf(": unhandled!\n");
-       return 0;
-}
-
 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
                                       union perf_event *event __maybe_unused,
                                       struct ordered_events *oe __maybe_unused)
@@ -244,23 +236,6 @@ static int process_finished_round(struct perf_tool *tool,
                                  union perf_event *event,
                                  struct ordered_events *oe);
 
-static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
-                                union perf_event *event __maybe_unused,
-                                struct perf_session *perf_session
-                                __maybe_unused)
-{
-       dump_printf(": unhandled!\n");
-       return 0;
-}
-
-static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused,
-                               union perf_event *event __maybe_unused,
-                               struct perf_session *session __maybe_unused)
-{
-       dump_printf(": unhandled!\n");
-       return 0;
-}
-
 static int skipn(int fd, off_t n)
 {
        char buf[4096];
@@ -287,10 +262,9 @@ static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
        return event->auxtrace.size;
 }
 
-static
-int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused,
-                                     union perf_event *event __maybe_unused,
-                                     struct perf_session *session __maybe_unused)
+static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
+                                 union perf_event *event __maybe_unused,
+                                 struct perf_session *session __maybe_unused)
 {
        dump_printf(": unhandled!\n");
        return 0;
@@ -331,7 +305,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
        if (tool->tracing_data == NULL)
                tool->tracing_data = process_event_synth_tracing_data_stub;
        if (tool->build_id == NULL)
-               tool->build_id = process_build_id_stub;
+               tool->build_id = process_event_op2_stub;
        if (tool->finished_round == NULL) {
                if (tool->ordered_events)
                        tool->finished_round = process_finished_round;
@@ -339,13 +313,13 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
                        tool->finished_round = process_finished_round_stub;
        }
        if (tool->id_index == NULL)
-               tool->id_index = process_id_index_stub;
+               tool->id_index = process_event_op2_stub;
        if (tool->auxtrace_info == NULL)
-               tool->auxtrace_info = process_event_auxtrace_info_stub;
+               tool->auxtrace_info = process_event_op2_stub;
        if (tool->auxtrace == NULL)
                tool->auxtrace = process_event_auxtrace_stub;
        if (tool->auxtrace_error == NULL)
-               tool->auxtrace_error = process_event_auxtrace_error_stub;
+               tool->auxtrace_error = process_event_op2_stub;
 }
 
 static void swap_sample_id_all(union perf_event *event, void *data)