subsystem. Also has support for calculating CPU cycle events
to determine how many clock cycles in a given period.
+config HAVE_PERF_REGS
+ bool
+ help
+ Support selective register dumps for perf events. This includes
+ bit-mapping of each registers and a unique architecture id.
+
+config HAVE_PERF_USER_STACK_DUMP
+ bool
+ help
+ Support user stack dumps for perf event samples. This needs
+ access to the user stack pointer which is not unified across
+ architectures.
+
config HAVE_ARCH_JUMP_LABEL
bool
config GENERIC_KERNEL_THREAD
bool
+ config GENERIC_KERNEL_EXECVE
+ bool
+
config HAVE_ARCH_SECCOMP_FILTER
bool
help
See Documentation/prctl/seccomp_filter.txt for details.
+config HAVE_RCU_USER_QS
+ bool
+ help
+ Provide kernel entry/exit hooks necessary for userspace
+ RCU extended quiescent state. Syscalls need to be wrapped inside
+ rcu_user_exit()-rcu_user_enter() through the slow path using
+ TIF_NOHZ flag. Exceptions handlers must be wrapped as well. Irqs
+ are already protected inside rcu_irq_enter/rcu_irq_exit() but
+ preemption or signal handling on irq exit still need to be protected.
+
+config HAVE_VIRT_CPU_ACCOUNTING
+ bool
+
+config HAVE_IRQ_TIME_ACCOUNTING
+ bool
+ help
+ Archs need to ensure they use a high enough resolution clock to
+ support irq time accounting and then call enable_sched_clock_irqtime().
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ bool
+
source "kernel/gcov/Kconfig"
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
+ select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_DEBUG_KMEMLEAK
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select HAVE_UID16
select ARCH_WANT_IPC_PARSE_VERSION
select HARDIRQS_SW_RESEND
select CPU_PM if (SUSPEND || CPU_IDLE)
select GENERIC_STRNLEN_USER
select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
this feature (eg, building a kernel for a single machine) and
you need to shrink the kernel to the minimal size.
+config NEED_MACH_GPIO_H
+ bool
+ help
+ Select this when mach/gpio.h is required to provide special
+ definitions for this platform. The need for mach/gpio.h should
+ be avoided when possible.
+
config NEED_MACH_IO_H
bool
help
#
choice
prompt "ARM system type"
- default ARCH_VERSATILE
+ default ARCH_MULTIPLATFORM
-config ARCH_SOCFPGA
- bool "Altera SOCFPGA family"
- select ARCH_WANT_OPTIONAL_GPIOLIB
- select ARM_AMBA
- select ARM_GIC
- select CACHE_L2X0
- select CLKDEV_LOOKUP
+config ARCH_MULTIPLATFORM
+ bool "Allow multiple platforms to be selected"
+ select ARM_PATCH_PHYS_VIRT
+ select AUTO_ZRELADDR
select COMMON_CLK
- select CPU_V7
- select DW_APB_TIMER
- select DW_APB_TIMER_OF
- select GENERIC_CLOCKEVENTS
- select GPIO_PL061 if GPIOLIB
- select HAVE_ARM_SCU
+ select MULTI_IRQ_HANDLER
select SPARSE_IRQ
select USE_OF
- help
- This enables support for Altera SOCFPGA Cyclone V platform
+ depends on MMU
config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
select ARCH_HAS_CPUFREQ
select COMMON_CLK
- select CLK_VERSATILE
+ select COMMON_CLK_VERSATILE
select HAVE_TCM
select ICST
select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE
select PLAT_VERSATILE_FPGA_IRQ
- select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
select MULTI_IRQ_HANDLER
config ARCH_REALVIEW
bool "ARM Ltd. RealView family"
select ARM_AMBA
- select CLKDEV_LOOKUP
- select HAVE_MACH_CLKDEV
+ select COMMON_CLK
+ select COMMON_CLK_VERSATILE
select ICST
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
select PLAT_VERSATILE
- select PLAT_VERSATILE_CLOCK
select PLAT_VERSATILE_CLCD
select ARM_TIMER_SP804
select GPIO_PL061 if GPIOLIB
select ICST
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
- select NEED_MACH_IO_H if PCI
select PLAT_VERSATILE
select PLAT_VERSATILE_CLOCK
select PLAT_VERSATILE_CLCD
help
This enables support for ARM Ltd Versatile board.
-config ARCH_VEXPRESS
- bool "ARM Ltd. Versatile Express family"
- select ARCH_WANT_OPTIONAL_GPIOLIB
- select ARM_AMBA
- select ARM_TIMER_SP804
- select CLKDEV_LOOKUP
- select COMMON_CLK
- select GENERIC_CLOCKEVENTS
- select HAVE_CLK
- select HAVE_PATA_PLATFORM
- select ICST
- select NO_IOPORT
- select PLAT_VERSATILE
- select PLAT_VERSATILE_CLCD
- select REGULATOR_FIXED_VOLTAGE if REGULATOR
- help
- This enables support for the ARM Ltd Versatile Express boards.
-
config ARCH_AT91
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
select CLKDEV_LOOKUP
select IRQ_DOMAIN
+ select NEED_MACH_GPIO_H
select NEED_MACH_IO_H if PCCARD
help
This enables support for systems based on Atmel
AT91RM9200 and AT91SAM9* processors.
-config ARCH_BCMRING
- bool "Broadcom BCMRING"
- depends on MMU
- select CPU_V6
- select ARM_AMBA
- select ARM_TIMER_SP804
- select CLKDEV_LOOKUP
- select GENERIC_CLOCKEVENTS
- select ARCH_WANT_OPTIONAL_GPIOLIB
- help
- Support for Broadcom's BCMRing platform.
-
-config ARCH_HIGHBANK
- bool "Calxeda Highbank-based"
+config ARCH_BCM2835
+ bool "Broadcom BCM2835 family"
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
- select ARM_GIC
+ select ARM_ERRATA_411920
select ARM_TIMER_SP804
- select CACHE_L2X0
select CLKDEV_LOOKUP
select COMMON_CLK
- select CPU_V7
+ select CPU_V6
select GENERIC_CLOCKEVENTS
- select HAVE_ARM_SCU
- select HAVE_SMP
+ select MULTI_IRQ_HANDLER
select SPARSE_IRQ
select USE_OF
help
- Support for the Calxeda Highbank SoC based boards.
+ This enables support for the Broadcom BCM2835 SoC. This SoC is
+ use in the Raspberry Pi, and Roku 2 devices.
config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
select CPU_ARM720T
select ARCH_USES_GETTIMEOFFSET
+ select COMMON_CLK
+ select CLKDEV_LOOKUP
select NEED_MACH_MEMORY_H
help
Support for Cirrus Logic 711x/721x/731x based boards.
help
Support for the Cortina Systems Gemini family SoCs
-config ARCH_PRIMA2
- bool "CSR SiRFSoC PRIMA2 ARM Cortex A9 Platform"
- select CPU_V7
+config ARCH_SIRF
+ bool "CSR SiRF"
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
- select CLKDEV_LOOKUP
+ select COMMON_CLK
select GENERIC_IRQ_CHIP
select MIGHT_HAVE_CACHE_L2X0
select PINCTRL
select PINCTRL_SIRF
select USE_OF
- select ZONE_DMA
help
- Support for CSR SiRFSoC ARM Cortex A9 Platform
+ Support for CSR SiRFprimaII/Marco/Polo platforms
config ARCH_EBSA110
bool "EBSA-110"
select FOOTBRIDGE
select GENERIC_CLOCKEVENTS
select HAVE_IDE
- select NEED_MACH_IO_H
+ select NEED_MACH_IO_H if !MMU
select NEED_MACH_MEMORY_H
help
Support for systems based on the DC21285 companion chip
select CLKSRC_MMIO
select COMMON_CLK
select HAVE_CLK_PREPARE
+ select MULTI_IRQ_HANDLER
select PINCTRL
+ select SPARSE_IRQ
select USE_OF
help
Support for Freescale MXS-based family of processors
select PCI
select ARCH_SUPPORTS_MSI
select VMSPLIT_1G
- select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
select NEED_RET_TO_USER
help
bool "IOP32x-based"
depends on MMU
select CPU_XSCALE
- select NEED_MACH_IO_H
+ select NEED_MACH_GPIO_H
select NEED_RET_TO_USER
select PLAT_IOP
select PCI
bool "IOP33x-based"
depends on MMU
select CPU_XSCALE
- select NEED_MACH_IO_H
+ select NEED_MACH_GPIO_H
select NEED_RET_TO_USER
select PLAT_IOP
select PCI
help
Support for Intel's IXP4XX (XScale) family of processors.
-config ARCH_MVEBU
- bool "Marvell SOCs with Device Tree support"
- select GENERIC_CLOCKEVENTS
- select MULTI_IRQ_HANDLER
- select SPARSE_IRQ
- select CLKSRC_MMIO
- select GENERIC_IRQ_CHIP
- select IRQ_DOMAIN
- select COMMON_CLK
- help
- Support for the Marvell SoC Family with device tree support
-
config ARCH_DOVE
bool "Marvell Dove"
select CPU_V7
- select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
- select NEED_MACH_IO_H
- select PLAT_ORION
+ select MIGHT_HAVE_PCI
+ select PLAT_ORION_LEGACY
+ select USB_ARCH_HAS_EHCI
help
Support for the Marvell Dove SoC 88AP510
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
- select NEED_MACH_IO_H
- select PLAT_ORION
+ select PLAT_ORION_LEGACY
help
Support for the following Marvell Kirkwood series SoCs:
88F6180, 88F6192 and 88F6281.
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
- select NEED_MACH_IO_H
- select PLAT_ORION
+ select PLAT_ORION_LEGACY
help
Support for the following Marvell MV78xx0 series SoCs:
MV781x0, MV782x0.
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
- select NEED_MACH_IO_H
- select PLAT_ORION
+ select PLAT_ORION_LEGACY
help
Support for the following Marvell Orion 5x series SoCs:
Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182),
select PLAT_PXA
select SPARSE_IRQ
select GENERIC_ALLOCATOR
+ select NEED_MACH_GPIO_H
help
Support for Marvell's PXA168/PXA910(MMP) and MMP2 processor line.
bool "Micrel/Kendin KS8695"
select CPU_ARM922T
select ARCH_REQUIRE_GPIOLIB
- select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
+ select CLKSRC_MMIO
+ select GENERIC_CLOCKEVENTS
help
Support for Micrel/Kendin KS8695 "Centaur" (ARM922T) based
System-on-Chip devices.
select HAVE_CLK
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
- select NEED_MACH_IO_H if PCI
select ARCH_HAS_CPUFREQ
select USE_OF
+ select COMMON_CLK
help
This enables support for NVIDIA Tegra based systems (Tegra APX,
Tegra 6xx and Tegra 2 series).
-config ARCH_PICOXCELL
- bool "Picochip picoXcell"
- select ARCH_REQUIRE_GPIOLIB
- select ARM_PATCH_PHYS_VIRT
- select ARM_VIC
- select CPU_V6K
- select DW_APB_TIMER
- select DW_APB_TIMER_OF
- select GENERIC_CLOCKEVENTS
- select GENERIC_GPIO
- select HAVE_TCM
- select NO_IOPORT
- select SPARSE_IRQ
- select USE_OF
- help
- This enables support for systems based on the Picochip picoXcell
- family of Femtocell devices. The picoxcell support requires device tree
- for all boards.
-
-config ARCH_PNX4008
- bool "Philips Nexperia PNX4008 Mobile"
- select CPU_ARM926T
- select CLKDEV_LOOKUP
- select ARCH_USES_GETTIMEOFFSET
- help
- This enables support for Philips PNX4008 mobile platform.
-
config ARCH_PXA
bool "PXA2xx/PXA3xx-based"
depends on MMU
select MULTI_IRQ_HANDLER
select ARM_CPU_SUSPEND if PM
select HAVE_IDE
+ select NEED_MACH_GPIO_H
help
Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
select HAVE_IDE
+ select NEED_MACH_GPIO_H
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
help
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ select NEED_MACH_GPIO_H
select NEED_MACH_IO_H
help
Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443
select SAMSUNG_GPIOLIB_4BIT
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ select NEED_MACH_GPIO_H
help
Samsung S3C64XX series based systems
select GENERIC_CLOCKEVENTS
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS
+ select NEED_MACH_GPIO_H
help
Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440,
SMDK6450.
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ select NEED_MACH_GPIO_H
help
Samsung S5PC100 series based systems
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ select NEED_MACH_GPIO_H
select NEED_MACH_MEMORY_H
help
Samsung S5PV210/S5PC110 series based systems
select HAVE_S3C_RTC if RTC_CLASS
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ select NEED_MACH_GPIO_H
select NEED_MACH_MEMORY_H
help
Support for SAMSUNG's EXYNOS SoCs (EXYNOS4/5)
select PCI
select ARCH_USES_GETTIMEOFFSET
select NEED_MACH_MEMORY_H
- select NEED_MACH_IO_H
help
Support for the StrongARM based Digital DNARD machine, also known
as "Shark" (<http://www.shark-linux.de/shark.html>).
select COMMON_CLK
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
+ select SPARSE_IRQ
help
Support for ST-Ericsson U300 series mobile platforms.
select COMMON_CLK
select GENERIC_CLOCKEVENTS
select PINCTRL
+ select PINCTRL_STN8815
select MIGHT_HAVE_CACHE_L2X0
select ARCH_REQUIRE_GPIOLIB
help
select GENERIC_ALLOCATOR
select GENERIC_IRQ_CHIP
select ARCH_HAS_HOLES_MEMORYMODEL
+ select NEED_MACH_GPIO_H
help
Support for TI's DaVinci platform.
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select ARCH_HAS_HOLES_MEMORYMODEL
+ select NEED_MACH_GPIO_H
help
Support for TI's OMAP platform (OMAP1/2/3/4).
select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
+ select USE_OF
+ select COMMON_CLK
+ select HAVE_CLK
+ select CLKDEV_LOOKUP
help
Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
Support for Xilinx Zynq ARM Cortex A9 Platform
endchoice
+menu "Multiple platform selection"
+ depends on ARCH_MULTIPLATFORM
+
+comment "CPU Core family selection"
+
+config ARCH_MULTI_V4
+ bool "ARMv4 based platforms (FA526, StrongARM)"
+ select ARCH_MULTI_V4_V5
+ depends on !ARCH_MULTI_V6_V7
+
+config ARCH_MULTI_V4T
+ bool "ARMv4T based platforms (ARM720T, ARM920T, ...)"
+ select ARCH_MULTI_V4_V5
+ depends on !ARCH_MULTI_V6_V7
+
+config ARCH_MULTI_V5
+ bool "ARMv5 based platforms (ARM926T, XSCALE, PJ1, ...)"
+ select ARCH_MULTI_V4_V5
+ depends on !ARCH_MULTI_V6_V7
+
+config ARCH_MULTI_V4_V5
+ bool
+
+config ARCH_MULTI_V6
+ bool "ARMv6 based platforms (ARM11, Scorpion, ...)"
+ select CPU_V6
+ select ARCH_MULTI_V6_V7
+
+config ARCH_MULTI_V7
+ bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)"
+ select CPU_V7
+ select ARCH_VEXPRESS
+ default y
+ select ARCH_MULTI_V6_V7
+
+config ARCH_MULTI_V6_V7
+ bool
+
+config ARCH_MULTI_CPU_AUTO
+ def_bool !(ARCH_MULTI_V4 || ARCH_MULTI_V4T || ARCH_MULTI_V6_V7)
+ select ARCH_MULTI_V5
+
+endmenu
+
#
# This is sorted alphabetically by mach-* pathname. However, plat-*
# Kconfigs may be included either alphabetically (according to the
source "arch/arm/mach-at91/Kconfig"
-source "arch/arm/mach-bcmring/Kconfig"
-
source "arch/arm/mach-clps711x/Kconfig"
source "arch/arm/mach-cns3xxx/Kconfig"
source "arch/arm/mach-h720x/Kconfig"
+source "arch/arm/mach-highbank/Kconfig"
+
source "arch/arm/mach-integrator/Kconfig"
source "arch/arm/mach-iop32x/Kconfig"
source "arch/arm/mach-orion5x/Kconfig"
+source "arch/arm/mach-picoxcell/Kconfig"
+
source "arch/arm/mach-pxa/Kconfig"
source "arch/arm/plat-pxa/Kconfig"
source "arch/arm/plat-samsung/Kconfig"
source "arch/arm/plat-s3c24xx/Kconfig"
+source "arch/arm/mach-socfpga/Kconfig"
+
source "arch/arm/plat-spear/Kconfig"
source "arch/arm/mach-s3c24xx/Kconfig"
source "arch/arm/mach-shmobile/Kconfig"
+source "arch/arm/mach-prima2/Kconfig"
+
source "arch/arm/mach-tegra/Kconfig"
source "arch/arm/mach-u300/Kconfig"
source "arch/arm/mach-vexpress/Kconfig"
source "arch/arm/plat-versatile/Kconfig"
-source "arch/arm/mach-vt8500/Kconfig"
-
source "arch/arm/mach-w90x900/Kconfig"
# Definitions to make life easier
select IRQ_DOMAIN
select COMMON_CLK
+config PLAT_ORION_LEGACY
+ bool
+ select PLAT_ORION
+
config PLAT_PXA
bool
depends on CPU_XSCALE
default y
-config CPU_HAS_PMU
- depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
- (!ARCH_OMAP3 || OMAP3_EMU)
- default y
- bool
-
config MULTI_IRQ_HANDLER
bool
help
on systems with an outer cache, the store buffer is drained
explicitly.
+config ARM_ERRATA_775420
+ bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 775420 Cortex-A9 (r2p2,
+ r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
+ operation aborts with MMU exception, it might cause the processor
+ to deadlock. This workaround puts DSB before executing ISB if
+ an abort may occur on cache maintenance.
+
endmenu
source "arch/arm/common/Kconfig"
default 355 if ARCH_U8500
default 264 if MACH_H4700
default 512 if SOC_OMAP5
+ default 288 if ARCH_VT8500
default 0
help
Maximum number of GPIOs in the system.
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && CPU_HAS_PMU
+ depends on PERF_EVENTS
default y
help
Enable hardware performance counter support for perf events. If
config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ARCH_SHMOBILE
range 11 64 if ARCH_SHMOBILE
+ default "12" if SOC_AM33XX
default "9" if SA1111
default "11"
help
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
-config LEDS
- bool "Timer and CPU usage LEDs"
- depends on ARCH_CDB89712 || ARCH_EBSA110 || \
- ARCH_EBSA285 || ARCH_INTEGRATOR || \
- ARCH_LUBBOCK || MACH_MAINSTONE || ARCH_NETWINDER || \
- ARCH_OMAP || ARCH_P720T || ARCH_PXA_IDP || \
- ARCH_SA1100 || ARCH_SHARK || ARCH_VERSATILE || \
- ARCH_AT91 || ARCH_DAVINCI || \
- ARCH_KS8695 || MACH_RD88F5182 || ARCH_REALVIEW
- help
- If you say Y here, the LEDs on your machine will be used
- to provide useful information about your current system status.
-
- If you are compiling a kernel for a NetWinder or EBSA-285, you will
- be able to select which LEDs are active using the options below. If
- you are compiling a kernel for the EBSA-110 or the LART however, the
- red LED will simply flash regularly to indicate that the system is
- still functional. It is safe to say Y here if you have a CATS
- system, but the driver will do nothing.
-
-config LEDS_TIMER
- bool "Timer LED" if (!ARCH_CDB89712 && !ARCH_OMAP) || \
- OMAP_OSK_MISTRAL || MACH_OMAP_H2 \
- || MACH_OMAP_PERSEUS2
- depends on LEDS
- depends on !GENERIC_CLOCKEVENTS
- default y if ARCH_EBSA110
- help
- If you say Y here, one of the system LEDs (the green one on the
- NetWinder, the amber one on the EBSA285, or the red one on the LART)
- will flash regularly to indicate that the system is still
- operational. This is mainly useful to kernel hackers who are
- debugging unstable kernels.
-
- The LART uses the same LED for both Timer LED and CPU usage LED
- functions. You may choose to use both, but the Timer LED function
- will overrule the CPU usage LED.
-
-config LEDS_CPU
- bool "CPU usage LED" if (!ARCH_CDB89712 && !ARCH_EBSA110 && \
- !ARCH_OMAP) \
- || OMAP_OSK_MISTRAL || MACH_OMAP_H2 \
- || MACH_OMAP_PERSEUS2
- depends on LEDS
- help
- If you say Y here, the red LED will be used to give a good real
- time indication of CPU usage, by lighting whenever the idle task
- is not currently executing.
-
- The LART uses the same LED for both Timer LED and CPU usage LED
- functions. You may choose to use both, but the Timer LED function
- will overrule the CPU usage LED.
-
config ALIGNMENT_TRAP
bool
depends on CPU_CP15_MMU
configuration it is safe to say N, otherwise say Y.
config UACCESS_WITH_MEMCPY
- bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user() (EXPERIMENTAL)"
- depends on MMU && EXPERIMENTAL
+ bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
+ depends on MMU
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
neutralized via a kernel panic.
This feature requires gcc version 4.2 or above.
-config DEPRECATED_PARAM_STRUCT
- bool "Provide old way to pass kernel parameters"
+config XEN_DOM0
+ def_bool y
+ depends on XEN
+
+config XEN
+ bool "Xen guest support on ARM (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && ARM && OF
help
- This was deprecated in 2001 and announced to live on for 5 years.
- Some old boot loaders still use this way.
+ Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
endmenu
help
Include support for flattened device tree machine descriptions.
+config ATAGS
+ bool "Support for the traditional ATAGS boot data passing" if USE_OF
+ default y
+ help
+ This is the traditional way of passing data to the kernel at boot
+ time. If you are solely relying on the flattened device tree (or
+ the ARM_ATAG_DTB_COMPAT option) then you may unselect this option
+ to remove ATAGS support from your kernel binary. If unsure,
+ leave this to y.
+
+config DEPRECATED_PARAM_STRUCT
+ bool "Provide old way to pass kernel parameters"
+ depends on ATAGS
+ help
+ This was deprecated in 2001 and announced to live on for 5 years.
+ Some old boot loaders still use this way.
+
# Compressed boot loader in ROM. Yes, we really want to ask about
# TEXT and BSS so we preserve their values in the config files.
config ZBOOT_ROM_TEXT
choice
prompt "Kernel command line type" if CMDLINE != ""
default CMDLINE_FROM_BOOTLOADER
+ depends on ATAGS
config CMDLINE_FROM_BOOTLOADER
bool "Use bootloader kernel arguments if available"
config XIP_KERNEL
bool "Kernel Execute-In-Place from ROM"
- depends on !ZBOOT_ROM && !ARM_LPAE
+ depends on !ZBOOT_ROM && !ARM_LPAE && !ARCH_MULTIPLATFORM
help
Execute-In-Place allows the kernel to run from non-volatile storage
directly addressable by the CPU, such as NOR flash. This saves RAM
config ATAGS_PROC
bool "Export atags in procfs"
- depends on KEXEC
+ depends on ATAGS && KEXEC
default y
help
Should the atags used to boot the kernel be exported in an "atags"
source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
- depends on !ARCH_S5PC100 && !ARCH_TEGRA
+ depends on !ARCH_S5PC100
depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK
def_bool y
#define __NR_setns (__NR_SYSCALL_BASE+375)
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
+ /* 378 for kcmp */
+
+/*
+ * This may need to be greater than __NR_last_syscall+1 in order to
+ * account for the padding in the syscall table
+ */
+#ifdef __KERNEL__
+#define __NR_syscalls (380)
+#endif /* __KERNEL__ */
/*
* The following SWIs are ARM private.
#define __ARCH_WANT_SYS_SOCKETCALL
#endif
#define __ARCH_WANT_SYS_EXECVE
- #define __ARCH_WANT_KERNEL_EXECVE
/*
* "Conditional" syscalls
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
+#define __IGNORE_kcmp
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */
*/
ENTRY(ret_from_fork)
bl schedule_tail
+ cmp r5, #0
+ movne r0, r4
+ movne lr, pc
+ movne pc, r5
get_thread_info tsk
- mov why, #1
b ret_slow_syscall
ENDPROC(ret_from_fork)
- ENTRY(ret_from_kernel_thread)
- UNWIND(.fnstart)
- UNWIND(.cantunwind)
- bl schedule_tail
- mov r0, r4
- adr lr, BSYM(1f) @ kernel threads should not exit
- mov pc, r5
- 1: bl do_exit
- nop
- UNWIND(.fnend)
- ENDPROC(ret_from_kernel_thread)
-
- /*
- * turn a kernel thread into userland process
- * use: ret_from_kernel_execve(struct pt_regs *normal)
- */
- ENTRY(ret_from_kernel_execve)
- mov why, #0 @ not a syscall
- str why, [r0, #S_R0] @ ... and we want 0 in ->ARM_r0 as well
- get_thread_info tsk @ thread structure
- mov sp, r0 @ stack pointer just under pt_regs
- b ret_slow_syscall
- ENDPROC(ret_from_kernel_execve)
-
.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
#include "calls.S"
+
+/*
+ * Ensure that the system call table is equal to __NR_syscalls,
+ * which is the value the rest of the system sees
+ */
+.ifne NR_syscalls - __NR_syscalls
+.error "__NR_syscalls is not equal to the size of the syscall table"
+.endif
+
#undef CALL
#define CALL(x) .long x
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/cpuidle.h>
+#include <linux/leds.h>
#include <asm/cacheflush.h>
-#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/thread_notify.h>
#include <asm/stacktrace.h>
while (1) {
tick_nohz_idle_enter();
rcu_idle_enter();
- leds_event(led_idle_start);
+ ledtrig_cpu(CPU_LED_IDLE_START);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
} else
local_irq_enable();
}
- leds_event(led_idle_end);
+ ledtrig_cpu(CPU_LED_IDLE_END);
rcu_idle_exit();
tick_nohz_idle_exit();
schedule_preempt_disabled();
}
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
- asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
int
copy_thread(unsigned long clone_flags, unsigned long stack_start,
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
- thread->cpu_context.pc = (unsigned long)ret_from_fork;
} else {
+ memset(childregs, 0, sizeof(struct pt_regs));
thread->cpu_context.r4 = stk_sz;
thread->cpu_context.r5 = stack_start;
- thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
childregs->ARM_cpsr = SVC_MODE;
}
+ thread->cpu_context.pc = (unsigned long)ret_from_fork;
thread->cpu_context.sp = (unsigned long)childregs;
clear_ptrace_hw_breakpoint(p);
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+ #include <linux/kthread.h>
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>
{
unsigned long flags;
struct eeh_event *event;
- struct eeh_dev *edev;
+ struct eeh_pe *pe;
- set_task_comm(current, "eehd");
-
spin_lock_irqsave(&eeh_eventlist_lock, flags);
event = NULL;
/* Serialize processing of EEH events */
mutex_lock(&eeh_event_mutex);
- edev = event->edev;
- eeh_mark_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
-
- printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
- eeh_pci_name(edev->pdev));
+ pe = event->pe;
+ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+ pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n",
+ pe->phb->global_number, pe->addr);
set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
- edev = handle_eeh_events(event);
-
- if (edev) {
- eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
- pci_dev_put(edev->pdev);
- }
+ eeh_handle_event(pe);
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
kfree(event);
mutex_unlock(&eeh_event_mutex);
/* If there are no new errors after an hour, clear the counter. */
- if (edev && edev->freeze_count>0) {
+ if (pe && pe->freeze_count > 0) {
msleep_interruptible(3600*1000);
- if (edev->freeze_count>0)
- edev->freeze_count--;
+ if (pe->freeze_count > 0)
+ pe->freeze_count--;
}
*/
static void eeh_thread_launcher(struct work_struct *dummy)
{
- if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
+ if (IS_ERR(kthread_run(eeh_event_handler, NULL, "eehd")))
printk(KERN_ERR "Failed to start EEH daemon\n");
}
/**
* eeh_send_failure_event - Generate a PCI error event
- * @edev: EEH device
+ * @pe: EEH PE
*
* This routine can be called within an interrupt context;
* the actual event will be delivered in a normal context
* (from a workqueue).
*/
-int eeh_send_failure_event(struct eeh_dev *edev)
+int eeh_send_failure_event(struct eeh_pe *pe)
{
unsigned long flags;
struct eeh_event *event;
- struct device_node *dn = eeh_dev_to_of_node(edev);
- const char *location;
-
- if (!mem_init_done) {
- printk(KERN_ERR "EEH: event during early boot not handled\n");
- location = of_get_property(dn, "ibm,loc-code", NULL);
- printk(KERN_ERR "EEH: device node = %s\n", dn->full_name);
- printk(KERN_ERR "EEH: PCI location = %s\n", location);
- return 1;
- }
- event = kmalloc(sizeof(*event), GFP_ATOMIC);
- if (event == NULL) {
- printk(KERN_ERR "EEH: out of memory, event not handled\n");
- return 1;
- }
-
- if (edev->pdev)
- pci_dev_get(edev->pdev);
- event->edev = edev;
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event) {
+ pr_err("EEH: out of memory, event not handled\n");
+ return -ENOMEM;
+ }
+ event->pe = pe;
/* We may or may not be called in an interrupt context */
spin_lock_irqsave(&eeh_eventlist_lock, flags);
struct task_struct;
-#include "asm/ptrace.h"
-#include "registers.h"
-#include "sysdep/archsetjmp.h"
+#include <asm/ptrace.h>
+#include <registers.h>
+#include <sysdep/archsetjmp.h>
#include <linux/prefetch.h>
jmp_buf *fault_catcher;
struct task_struct *prev_sched;
unsigned long temp_stack;
- jmp_buf *exec_buf;
struct arch_thread arch;
jmp_buf switch_buf;
int mm_count;
.fault_addr = NULL, \
.prev_sched = NULL, \
.temp_stack = 0, \
- .exec_buf = NULL, \
.arch = INIT_ARCH_THREAD, \
.request = { 0 } \
}
#define __OS_H__
#include <stdarg.h>
-#include "irq_user.h"
-#include "longjmp.h"
-#include "mm_id.h"
+#include <irq_user.h>
+#include <longjmp.h>
+#include <mm_id.h>
#define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR))
extern int os_getpgrp(void);
extern void init_new_thread_signals(void);
- extern int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr);
extern int os_map_memory(void *virt, int fd, unsigned long long off,
unsigned long len, int r, int w, int x);
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
-#include "as-layout.h"
-#include "mem_user.h"
-#include "skas.h"
-#include "os.h"
+#include <as-layout.h>
+#include <mem_user.h>
+#include <skas.h>
+#include <os.h>
void flush_thread(void)
{
#endif
}
EXPORT_SYMBOL(start_thread);
-
- void __noreturn ret_from_kernel_execve(struct pt_regs *unused)
- {
- UML_LONGJMP(current->thread.exec_buf, 1);
- }
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
-#include "as-layout.h"
-#include "kern_util.h"
-#include "os.h"
-#include "skas.h"
+#include <as-layout.h>
+#include <kern_util.h>
+#include <os.h>
+#include <skas.h>
/*
* This is a per-cpu array. A processor only modifies its entry and it only
arg = current->thread.request.u.thread.arg;
/*
- * The return value is 1 if the kernel thread execs a process,
- * 0 if it just exits
+ * callback returns only if the kernel thread execs a process
*/
- n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf);
- if (n == 1)
- userspace(¤t->thread.regs.regs);
- else
- do_exit(0);
+ n = fn(arg);
+ userspace(¤t->thread.regs.regs);
}
/* Called magically, see new_thread_handler above */
#include <sys/ptrace.h>
#include <sys/wait.h>
#include <asm/unistd.h>
-#include "init.h"
-#include "longjmp.h"
-#include "os.h"
-#include "skas_ptrace.h"
+#include <init.h>
+#include <longjmp.h>
+#include <os.h>
+#include <skas_ptrace.h>
#define ARBITRARY_ADDR -1
#define FAILURE_PID -1
signal(SIGWINCH, SIG_IGN);
signal(SIGTERM, SIG_DFL);
}
-
- int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr)
- {
- jmp_buf buf;
- int n;
-
- *jmp_ptr = &buf;
- n = UML_SETJMP(&buf);
- if (n != 0)
- return n;
- (*fn)(arg);
- return 0;
- }
Say no to build a 32-bit kernel - formerly known as i386
config X86_32
- def_bool !64BIT
+ def_bool y
+ depends on !64BIT
select CLKSRC_I8253
+ select HAVE_UID16
config X86_64
- def_bool 64BIT
+ def_bool y
+ depends on 64BIT
select X86_DEV_DMA_OPS
### Arch settings
select HAVE_KRETPROBES
select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FENTRY if X86_64
select HAVE_C_RECORDMCOUNT
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_FP_TEST
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_SYSCALL_TRACEPOINTS
+ select SYSCTL_EXCEPTION_TRACE
select HAVE_KVM
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_MIXED_BREAKPOINTS_REGS
select PERF_EVENTS
select HAVE_PERF_EVENTS_NMI
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_DEBUG_KMEMLEAK
select ANON_INODES
select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386
select HAVE_CMPXCHG_LOCAL if !M386
select IRQ_FORCED_THREADING
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_BPF_JIT if X86_64
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select CLKEVT_I8253
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
select KTIME_SCALAR if X86_32
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
+ select HAVE_RCU_USER_QS if X86_64
+ select HAVE_IRQ_TIME_ACCOUNTING
select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
config INSTRUCTION_DECODER
- def_bool (KPROBES || PERF_EVENTS || UPROBES)
+ def_bool y
+ depends on KPROBES || PERF_EVENTS || UPROBES
config OUTPUT_FORMAT
string
bool
config NEED_DMA_MAP_STATE
- def_bool (X86_64 || INTEL_IOMMU || DMA_API_DEBUG)
+ def_bool y
+ depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
config NEED_SG_DMA_LENGTH
def_bool y
config GENERIC_ISA_DMA
- def_bool ISA_DMA_API
+ def_bool y
+ depends on ISA_DMA_API
config GENERIC_BUG
def_bool y
bool
config ARCH_MAY_HAVE_PC_FDC
- def_bool ISA_DMA_API
+ def_bool y
+ depends on ISA_DMA_API
config RWSEM_GENERIC_SPINLOCK
- def_bool !X86_XADD
+ def_bool y
+ depends on !X86_XADD
config RWSEM_XCHGADD_ALGORITHM
- def_bool X86_XADD
+ def_bool y
+ depends on X86_XADD
config GENERIC_CALIBRATE_DELAY
def_bool y
source "arch/x86/xen/Kconfig"
-config KVM_CLOCK
- bool "KVM paravirtualized clock"
- select PARAVIRT
- select PARAVIRT_CLOCK
- ---help---
- Turning on this option will allow you to run a paravirtualized clock
- when running over the KVM hypervisor. Instead of relying on a PIT
- (or probably other) emulation by the underlying device model, the host
- provides the guest with timing infrastructure such as time of day, and
- system time
-
config KVM_GUEST
- bool "KVM Guest support"
+ bool "KVM Guest support (including kvmclock)"
+ select PARAVIRT
select PARAVIRT
+ select PARAVIRT_CLOCK
+ default y if PARAVIRT_GUEST
---help---
This option enables various optimizations for running under the KVM
- hypervisor.
+ hypervisor. It includes a paravirtualized clock, so that instead
+ of relying on a PIT (or probably other) emulation by the
+ underlying device model, the host provides the guest with
+ timing infrastructure such as time of day, and system time
source "arch/x86/lguest/Kconfig"
def_bool y if X86_64
---help---
Support for software bounce buffers used on x86-64 systems
- which don't have a hardware IOMMU (e.g. the current generation
- of Intel's x86-64 CPUs). Using this PCI devices which can only
- access 32-bits of memory can be used on systems with more than
- 3 GB of memory. If unsure, say Y.
+ which don't have a hardware IOMMU. Using this PCI devices
+ which can only access 32-bits of memory can be used on systems
+ with more than 3 GB of memory.
+ If unsure, say Y.
config IOMMU_HELPER
- def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
+ def_bool y
+ depends on CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
-config IRQ_TIME_ACCOUNTING
- bool "Fine granularity task level IRQ time accounting"
- default n
- ---help---
- Select this option to enable fine granularity task irq time
- accounting. This is done by reading a timestamp on each
- transitions between softirq and hardirq state, so there can be a
- small performance impact.
-
- If in doubt, say N here.
-
source "kernel/Kconfig.preempt"
config X86_UP_APIC
config X86_MCE
bool "Machine Check / overheating reporting"
+ default y
---help---
Machine Check support allows the processor to notify the
kernel if it detects a problem (e.g. overheating, data corruption).
Say N otherwise.
config MICROCODE
- tristate "/dev/cpu/microcode - microcode support"
+ tristate "CPU microcode loading support"
select FW_LOADER
---help---
+
If you say Y here, you will be able to update the microcode on
certain Intel and AMD processors. The Intel support is for the
- IA32 family, e.g. Pentium Pro, Pentium II, Pentium III,
- Pentium 4, Xeon etc. The AMD support is for family 0x10 and
- 0x11 processors, e.g. Opteron, Phenom and Turion 64 Ultra.
- You will obviously need the actual microcode binary data itself
- which is not shipped with the Linux kernel.
+ IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
+ Xeon etc. The AMD support is for families 0x10 and later. You will
+ obviously need the actual microcode binary data itself which is not
+ shipped with the Linux kernel.
This option selects the general module only, you need to select
at least one vendor specific module as well.
- To compile this driver as a module, choose M here: the
- module will be called microcode.
+ To compile this driver as a module, choose M here: the module
+ will be called microcode.
config MICROCODE_INTEL
- bool "Intel microcode patch loading support"
+ bool "Intel microcode loading support"
depends on MICROCODE
default MICROCODE
select FW_LOADER
<http://www.urbanmyth.org/microcode/>.
config MICROCODE_AMD
- bool "AMD microcode patch loading support"
+ bool "AMD microcode loading support"
depends on MICROCODE
select FW_LOADER
---help---
consumes more pagetable space per process.
config ARCH_PHYS_ADDR_T_64BIT
- def_bool X86_64 || X86_PAE
+ def_bool y
+ depends on X86_64 || X86_PAE
config ARCH_DMA_ADDR_T_64BIT
- def_bool X86_64 || HIGHMEM64G
+ def_bool y
+ depends on X86_64 || HIGHMEM64G
config DIRECT_GBPAGES
bool "Enable 1GB pages for kernel pagetables" if EXPERT
depends on ARCH_SPARSEMEM_ENABLE
config ARCH_MEMORY_PROBE
- def_bool X86_64
- depends on MEMORY_HOTPLUG
+ def_bool y
+ depends on X86_64 && MEMORY_HOTPLUG
config ARCH_PROC_KCORE_TEXT
def_bool y
If supported, this is a high bandwidth, cryptographically
secure hardware random number generator.
+config X86_SMAP
+ def_bool y
+ prompt "Supervisor Mode Access Prevention" if EXPERT
+ ---help---
+ Supervisor Mode Access Prevention (SMAP) is a security
+ feature in newer Intel processors. There is a small
+ performance cost if this enabled and turned on; there is
+ also a small increase in the kernel size if this is enabled.
+
+ If unsure, say Y.
+
config EFI
bool "EFI runtime service support"
depends on ACPI
config PCI_CNB20LE_QUIRK
bool "Read CNB20LE Host Bridge Windows" if EXPERT
- default n
depends on PCI && EXPERIMENTAL
help
Read the PCI windows out of the CNB20LE host bridge. This allows
bool "IA32 Emulation"
depends on X86_64
select COMPAT_BINFMT_ELF
+ select HAVE_UID16
---help---
Include code to run legacy 32-bit programs under a
64-bit kernel. You should likely turn this on, unless you're
depends on IA32_EMULATION || X86_X32
select ARCH_WANT_OLD_COMPAT_IPC
+if COMPAT
config COMPAT_FOR_U64_ALIGNMENT
- def_bool COMPAT
- depends on X86_64
+ def_bool y
config SYSVIPC_COMPAT
def_bool y
- depends on COMPAT && SYSVIPC
+ depends on SYSVIPC
config KEYS_COMPAT
- bool
- depends on COMPAT && KEYS
- default y
+ def_bool y
+ depends on KEYS
+endif
endmenu
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
+#include <asm/smap.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
CFI_ENDPROC
END(ret_from_fork)
- ENTRY(ret_from_kernel_execve)
- movl %eax, %esp
- movl $0,PT_EAX(%esp)
+ ENTRY(ret_from_kernel_thread)
+ CFI_STARTPROC
+ pushl_cfi %eax
+ call schedule_tail
GET_THREAD_INFO(%ebp)
+ popl_cfi %eax
+ pushl_cfi $0x0202 # Reset kernel eflags
+ popfl_cfi
+ movl PT_EBP(%esp),%eax
+ call *PT_EBX(%esp)
+ movl $0,PT_EAX(%esp)
jmp syscall_exit
- END(ret_from_kernel_execve)
+ CFI_ENDPROC
+ ENDPROC(ret_from_kernel_thread)
/*
* Interrupt exit functions should be protected against kprobes
*/
cmpl $__PAGE_OFFSET-3,%ebp
jae syscall_fault
+ ASM_STAC
1: movl (%ebp),%ebp
+ ASM_CLAC
movl %ebp,PT_EBP(%esp)
_ASM_EXTABLE(1b,syscall_fault)
# system call handler stub
ENTRY(system_call)
RING0_INT_FRAME # can't unwind into user space anyway
+ ASM_CLAC
pushl_cfi %eax # save orig_eax
SAVE_ALL
GET_THREAD_INFO(%ebp)
RING0_INT_FRAME # can't unwind into user space anyway
syscall_fault:
+ ASM_CLAC
GET_THREAD_INFO(%ebp)
movl $-EFAULT,PT_EAX(%esp)
jmp resume_userspace
*/
.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
+ ASM_CLAC
addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
SAVE_ALL
TRACE_IRQS_OFF
#define BUILD_INTERRUPT3(name, nr, fn) \
ENTRY(name) \
RING0_INT_FRAME; \
+ ASM_CLAC; \
pushl_cfi $~(nr); \
SAVE_ALL; \
TRACE_IRQS_OFF \
ENTRY(coprocessor_error)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0
pushl_cfi $do_coprocessor_error
jmp error_code
ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0
#ifdef CONFIG_X86_INVD_BUG
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
ENTRY(device_not_available)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $-1 # mark this as an int
pushl_cfi $do_device_not_available
jmp error_code
ENTRY(overflow)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0
pushl_cfi $do_overflow
jmp error_code
ENTRY(bounds)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0
pushl_cfi $do_bounds
jmp error_code
ENTRY(invalid_op)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0
pushl_cfi $do_invalid_op
jmp error_code
ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0
pushl_cfi $do_coprocessor_segment_overrun
jmp error_code
ENTRY(invalid_TSS)
RING0_EC_FRAME
+ ASM_CLAC
pushl_cfi $do_invalid_TSS
jmp error_code
CFI_ENDPROC
ENTRY(segment_not_present)
RING0_EC_FRAME
+ ASM_CLAC
pushl_cfi $do_segment_not_present
jmp error_code
CFI_ENDPROC
ENTRY(stack_segment)
RING0_EC_FRAME
+ ASM_CLAC
pushl_cfi $do_stack_segment
jmp error_code
CFI_ENDPROC
ENTRY(alignment_check)
RING0_EC_FRAME
+ ASM_CLAC
pushl_cfi $do_alignment_check
jmp error_code
CFI_ENDPROC
ENTRY(divide_error)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0 # no error code
pushl_cfi $do_divide_error
jmp error_code
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0
pushl_cfi machine_check_vector
jmp error_code
ENTRY(spurious_interrupt_bug)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $0
pushl_cfi $do_spurious_interrupt_bug
jmp error_code
*/
.popsection
- ENTRY(ret_from_kernel_thread)
- CFI_STARTPROC
- pushl_cfi %eax
- call schedule_tail
- GET_THREAD_INFO(%ebp)
- popl_cfi %eax
- pushl_cfi $0x0202 # Reset kernel eflags
- popfl_cfi
- movl PT_EBP(%esp),%eax
- call *PT_EBX(%esp)
- call do_exit
- ud2 # padding for call trace
- CFI_ENDPROC
- ENDPROC(ret_from_kernel_thread)
-
#ifdef CONFIG_XEN
/* Xen doesn't set %esp to be precisely what the normal sysenter
entrypoint expects, so fix it up before using the normal path. */
pushl %eax
pushl %ecx
pushl %edx
- movl 0xc(%esp), %eax
+ pushl $0 /* Pass NULL as regs pointer */
+ movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
+ leal function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
ftrace_call:
call ftrace_stub
+ addl $4,%esp /* skip NULL pointer */
popl %edx
popl %ecx
popl %eax
+ftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
ret
END(ftrace_caller)
+ENTRY(ftrace_regs_caller)
+ pushf /* push flags before compare (in cs location) */
+ cmpl $0, function_trace_stop
+ jne ftrace_restore_flags
+
+ /*
+ * i386 does not save SS and ESP when coming from kernel.
+ * Instead, to get sp, ®s->sp is used (see ptrace.h).
+ * Unfortunately, that means eflags must be at the same location
+ * as the current return ip is. We move the return ip into the
+ * ip location, and move flags into the return ip location.
+ */
+ pushl 4(%esp) /* save return ip into ip slot */
+
+ pushl $0 /* Load 0 into orig_ax */
+ pushl %gs
+ pushl %fs
+ pushl %es
+ pushl %ds
+ pushl %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ pushl %ecx
+ pushl %ebx
+
+ movl 13*4(%esp), %eax /* Get the saved flags */
+ movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
+ /* clobbering return ip */
+ movl $__KERNEL_CS,13*4(%esp)
+
+ movl 12*4(%esp), %eax /* Load ip (1st parameter) */
+ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
+ movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
+ leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+ pushl %esp /* Save pt_regs as 4th parameter */
+
+GLOBAL(ftrace_regs_call)
+ call ftrace_stub
+
+ addl $4, %esp /* Skip pt_regs */
+ movl 14*4(%esp), %eax /* Move flags back into cs */
+ movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
+ movl 12*4(%esp), %eax /* Get return ip from regs->ip */
+ movl %eax, 14*4(%esp) /* Put return ip back for ret */
+
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
+ popl %ds
+ popl %es
+ popl %fs
+ popl %gs
+ addl $8, %esp /* Skip orig_ax and ip */
+ popf /* Pop flags at end (no addl to corrupt flags) */
+ jmp ftrace_ret
+
+ftrace_restore_flags:
+ popf
+ jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
- cmpl $0, function_trace_stop
- jne ftrace_stub
-
pushl %eax
pushl %ecx
pushl %edx
ENTRY(page_fault)
RING0_EC_FRAME
+ ASM_CLAC
pushl_cfi $do_page_fault
ALIGN
error_code:
ENTRY(debug)
RING0_INT_FRAME
+ ASM_CLAC
cmpl $ia32_sysenter_target,(%esp)
jne debug_stack_correct
FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
*/
ENTRY(nmi)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi %eax
movl %ss, %eax
cmpw $__ESPFIX_SS, %ax
ENTRY(int3)
RING0_INT_FRAME
+ ASM_CLAC
pushl_cfi $-1 # mark this as an int
SAVE_ALL
TRACE_IRQS_OFF
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
RING0_EC_FRAME
+ ASM_CLAC
pushl_cfi $do_async_page_fault
jmp error_code
CFI_ENDPROC
#include <asm/ftrace.h>
#include <asm/percpu.h>
#include <asm/asm.h>
+#include <asm/rcu.h>
+#include <asm/smap.h>
#include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
.section .entry.text, "ax"
#ifdef CONFIG_FUNCTION_TRACER
+
+#ifdef CC_USING_FENTRY
+# define function_hook __fentry__
+#else
+# define function_hook mcount
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(mcount)
+
+ENTRY(function_hook)
retq
-END(mcount)
+END(function_hook)
+
+/* skip is set if stack has been adjusted */
+.macro ftrace_caller_setup skip=0
+ MCOUNT_SAVE_FRAME \skip
+
+ /* Load the ftrace_ops into the 3rd parameter */
+ leaq function_trace_op, %rdx
+
+ /* Load ip into the first parameter */
+ movq RIP(%rsp), %rdi
+ subq $MCOUNT_INSN_SIZE, %rdi
+ /* Load the parent_ip into the second parameter */
+#ifdef CC_USING_FENTRY
+ movq SS+16(%rsp), %rsi
+#else
+ movq 8(%rbp), %rsi
+#endif
+.endm
ENTRY(ftrace_caller)
+ /* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_stub
- MCOUNT_SAVE_FRAME
-
- movq 0x38(%rsp), %rdi
- movq 8(%rbp), %rsi
- subq $MCOUNT_INSN_SIZE, %rdi
+ ftrace_caller_setup
+ /* regs go into 4th parameter (but make it NULL) */
+ movq $0, %rcx
GLOBAL(ftrace_call)
call ftrace_stub
MCOUNT_RESTORE_FRAME
+ftrace_return:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call)
retq
END(ftrace_caller)
+ENTRY(ftrace_regs_caller)
+ /* Save the current flags before compare (in SS location)*/
+ pushfq
+
+ /* Check if tracing was disabled (quick check) */
+ cmpl $0, function_trace_stop
+ jne ftrace_restore_flags
+
+ /* skip=8 to skip flags saved in SS */
+ ftrace_caller_setup 8
+
+ /* Save the rest of pt_regs */
+ movq %r15, R15(%rsp)
+ movq %r14, R14(%rsp)
+ movq %r13, R13(%rsp)
+ movq %r12, R12(%rsp)
+ movq %r11, R11(%rsp)
+ movq %r10, R10(%rsp)
+ movq %rbp, RBP(%rsp)
+ movq %rbx, RBX(%rsp)
+ /* Copy saved flags */
+ movq SS(%rsp), %rcx
+ movq %rcx, EFLAGS(%rsp)
+ /* Kernel segments */
+ movq $__KERNEL_DS, %rcx
+ movq %rcx, SS(%rsp)
+ movq $__KERNEL_CS, %rcx
+ movq %rcx, CS(%rsp)
+ /* Stack - skipping return address */
+ leaq SS+16(%rsp), %rcx
+ movq %rcx, RSP(%rsp)
+
+ /* regs go into 4th parameter */
+ leaq (%rsp), %rcx
+
+GLOBAL(ftrace_regs_call)
+ call ftrace_stub
+
+ /* Copy flags back to SS, to restore them */
+ movq EFLAGS(%rsp), %rax
+ movq %rax, SS(%rsp)
+
+ /* Handlers can change the RIP */
+ movq RIP(%rsp), %rax
+ movq %rax, SS+8(%rsp)
+
+ /* restore the rest of pt_regs */
+ movq R15(%rsp), %r15
+ movq R14(%rsp), %r14
+ movq R13(%rsp), %r13
+ movq R12(%rsp), %r12
+ movq R10(%rsp), %r10
+ movq RBP(%rsp), %rbp
+ movq RBX(%rsp), %rbx
+
+ /* skip=8 to skip flags saved in SS */
+ MCOUNT_RESTORE_FRAME 8
+
+ /* Restore flags */
+ popfq
+
+ jmp ftrace_return
+ftrace_restore_flags:
+ popfq
+ jmp ftrace_stub
+
+END(ftrace_regs_caller)
+
+
#else /* ! CONFIG_DYNAMIC_FTRACE */
-ENTRY(mcount)
+
+ENTRY(function_hook)
cmpl $0, function_trace_stop
jne ftrace_stub
trace:
MCOUNT_SAVE_FRAME
- movq 0x38(%rsp), %rdi
+ movq RIP(%rsp), %rdi
+#ifdef CC_USING_FENTRY
+ movq SS+16(%rsp), %rsi
+#else
movq 8(%rbp), %rsi
+#endif
subq $MCOUNT_INSN_SIZE, %rdi
call *ftrace_trace_function
MCOUNT_RESTORE_FRAME
jmp ftrace_stub
-END(mcount)
+END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
- cmpl $0, function_trace_stop
- jne ftrace_stub
-
MCOUNT_SAVE_FRAME
+#ifdef CC_USING_FENTRY
+ leaq SS+16(%rsp), %rdi
+ movq $0, %rdx /* No framepointers needed */
+#else
leaq 8(%rbp), %rdi
- movq 0x38(%rsp), %rsi
movq (%rbp), %rdx
+#endif
+ movq RIP(%rsp), %rsi
subq $MCOUNT_INSN_SIZE, %rsi
call prepare_ftrace_return
.macro SAVE_ARGS_IRQ
cld
/* start from rbp in pt_regs and jump over */
- movq_cfi rdi, RDI-RBP
- movq_cfi rsi, RSI-RBP
- movq_cfi rdx, RDX-RBP
- movq_cfi rcx, RCX-RBP
- movq_cfi rax, RAX-RBP
- movq_cfi r8, R8-RBP
- movq_cfi r9, R9-RBP
- movq_cfi r10, R10-RBP
- movq_cfi r11, R11-RBP
+ movq_cfi rdi, (RDI-RBP)
+ movq_cfi rsi, (RSI-RBP)
+ movq_cfi rdx, (RDX-RBP)
+ movq_cfi rcx, (RCX-RBP)
+ movq_cfi rax, (RAX-RBP)
+ movq_cfi r8, (R8-RBP)
+ movq_cfi r9, (R9-RBP)
+ movq_cfi r10, (R10-RBP)
+ movq_cfi r11, (R11-RBP)
/* Save rbp so that we can unwind from get_irq_regs() */
movq_cfi rbp, 0
.endm
ENTRY(save_rest)
- PARTIAL_FRAME 1 REST_SKIP+8
+ PARTIAL_FRAME 1 (REST_SKIP+8)
movq 5*8+16(%rsp), %r11 /* save return address */
movq_cfi rbx, RBX+16
movq_cfi rbp, RBP+16
LOCK ; btr $TIF_FORK,TI_flags(%r8)
- pushq_cfi kernel_eflags(%rip)
+ pushq_cfi $0x0002
popfq_cfi # reset kernel eflags
call schedule_tail # rdi: 'prev' task parameter
jmp ret_from_sys_call # go to the SYSRET fastpath
1:
- subq $REST_SKIP, %rsp # move the stack pointer back
+ subq $REST_SKIP, %rsp # leave space for volatiles
CFI_ADJUST_CFA_OFFSET REST_SKIP
movq %rbp, %rdi
call *%rbx
- # exit
- mov %eax, %edi
- call do_exit
- ud2 # padding for call trace
-
+ movl $0, RAX(%rsp)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
CFI_ENDPROC
END(ret_from_fork)
* System call entry. Up to 6 arguments in registers are supported.
*
* SYSCALL does not save anything on the stack and does not change the
- * stack pointer.
+ * stack pointer. However, it does mask the flags register for us, so
+ * CLD and CLAC are not needed.
*/
/*
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
- call schedule
+ SCHEDULE_USER
popq_cfi %rdi
jmp sysret_check
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
- call schedule
+ SCHEDULE_USER
popq_cfi %rdi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
*/
.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
+ ASM_CLAC
XCPT_FRAME
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
- call schedule
+ SCHEDULE_USER
popq_cfi %rdi
GET_THREAD_INFO(%rcx)
DISABLE_INTERRUPTS(CLBR_NONE)
*/
.macro apicinterrupt num sym do_sym
ENTRY(\sym)
+ ASM_CLAC
INTR_FRAME
pushq_cfi $~(\num)
.Lcommon_\sym:
*/
.macro zeroentry sym do_sym
ENTRY(\sym)
+ ASM_CLAC
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
.macro paranoidzeroentry sym do_sym
ENTRY(\sym)
+ ASM_CLAC
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
+ ASM_CLAC
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
.macro errorentry sym do_sym
ENTRY(\sym)
+ ASM_CLAC
XCPT_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
subq $ORIG_RAX-R15, %rsp
/* error code is on the stack already */
.macro paranoiderrorentry sym do_sym
ENTRY(\sym)
+ ASM_CLAC
XCPT_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
subq $ORIG_RAX-R15, %rsp
jmp 2b
.previous
- ENTRY(ret_from_kernel_execve)
- movq %rdi, %rsp
- movl $0, RAX(%rsp)
- // RESTORE_REST
- movq 0*8(%rsp), %r15
- movq 1*8(%rsp), %r14
- movq 2*8(%rsp), %r13
- movq 3*8(%rsp), %r12
- movq 4*8(%rsp), %rbp
- movq 5*8(%rsp), %rbx
- addq $(6*8), %rsp
- jmp int_ret_from_sys_call
- END(ret_from_kernel_execve)
-
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
CFI_STARTPROC
paranoid_schedule:
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY)
- call schedule
+ SCHEDULE_USER
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
jmp paranoid_userspace
def_bool y
select GENERIC_FIND_FIRST_BIT
select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
config 64BIT
bool "64-bit kernel" if SUBARCH = "x86"
config X86_32
def_bool !64BIT
select HAVE_AOUT
+ select ARCH_WANT_IPC_PARSE_VERSION
config X86_64
def_bool 64BIT
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/file.h>
+ #include <linux/ptrace.h>
#include <asm/io.h>
#include <asm/bugs.h>
extern void fork_init(unsigned long);
extern void mca_init(void);
extern void sbus_init(void);
-extern void prio_tree_init(void);
extern void radix_tree_init(void);
#ifndef CONFIG_DEBUG_RODATA
static inline void mark_rodata_ro(void) { }
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();
- prio_tree_init();
init_timers();
hrtimers_init();
softirq_init();
acpi_early_init(); /* before LAPIC and SMP init */
sfi_init_late();
+ if (efi_enabled) {
+ efi_late_init();
+ efi_free_boot_services();
+ }
+
ftrace_init();
/* Do the rest non-__init'ed, we're now alive */
do_one_initcall(*fn);
}
- static void run_init_process(const char *init_filename)
+ static int run_init_process(const char *init_filename)
{
argv_init[0] = init_filename;
- kernel_execve(init_filename, argv_init, envp_init);
+ return kernel_execve(init_filename, argv_init, envp_init);
}
- /* This is a non __init function. Force it to be noinline otherwise gcc
- * makes it inline to init() and it becomes part of init.text section
- */
- static noinline int init_post(void)
+ static void __init kernel_init_freeable(void);
+
+ static int __ref kernel_init(void *unused)
{
+ kernel_init_freeable();
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
free_initmem();
flush_delayed_fput();
if (ramdisk_execute_command) {
- run_init_process(ramdisk_execute_command);
+ if (!run_init_process(ramdisk_execute_command))
+ return 0;
printk(KERN_WARNING "Failed to execute %s\n",
ramdisk_execute_command);
}
* trying to recover a really broken machine.
*/
if (execute_command) {
- run_init_process(execute_command);
+ if (!run_init_process(execute_command))
+ return 0;
printk(KERN_WARNING "Failed to execute %s. Attempting "
"defaults...\n", execute_command);
}
- run_init_process("/sbin/init");
- run_init_process("/etc/init");
- run_init_process("/bin/init");
- run_init_process("/bin/sh");
+ if (!run_init_process("/sbin/init") ||
+ !run_init_process("/etc/init") ||
+ !run_init_process("/bin/init") ||
+ !run_init_process("/bin/sh"))
+ return 0;
panic("No init found. Try passing init= option to kernel. "
"See Linux Documentation/init.txt for guidance.");
}
- static int __init kernel_init(void * unused)
+ static void __init kernel_init_freeable(void)
{
/*
* Wait until kthreadd is all set-up.
* we're essentially up and running. Get rid of the
* initmem segments and start the user-mode stuff..
*/
-
- init_post();
- return 0;
}
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/freezer.h>
+ #include <linux/ptrace.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
};
struct kthread {
- int should_stop;
+ unsigned long flags;
+ unsigned int cpu;
void *data;
+ struct completion parked;
struct completion exited;
};
+enum KTHREAD_BITS {
+ KTHREAD_IS_PER_CPU = 0,
+ KTHREAD_SHOULD_STOP,
+ KTHREAD_SHOULD_PARK,
+ KTHREAD_IS_PARKED,
+};
+
#define to_kthread(tsk) \
container_of((tsk)->vfork_done, struct kthread, exited)
* and this will return true. You should then return, and your return
* value will be passed through to kthread_stop().
*/
-int kthread_should_stop(void)
+bool kthread_should_stop(void)
{
- return to_kthread(current)->should_stop;
+ return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
}
EXPORT_SYMBOL(kthread_should_stop);
+/**
+ * kthread_should_park - should this kthread park now?
+ *
+ * When someone calls kthread_park() on your kthread, it will be woken
+ * and this will return true. You should then do the necessary
+ * cleanup and call kthread_parkme()
+ *
+ * Similar to kthread_should_stop(), but this keeps the thread alive
+ * and in a park position. kthread_unpark() "restarts" the thread and
+ * calls the thread function again.
+ */
+bool kthread_should_park(void)
+{
+ return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
+}
+
/**
* kthread_freezable_should_stop - should this freezable kthread return now?
* @was_frozen: optional out parameter, indicates whether %current was frozen
return to_kthread(task)->data;
}
+static void __kthread_parkme(struct kthread *self)
+{
+ __set_current_state(TASK_INTERRUPTIBLE);
+ while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
+ if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
+ complete(&self->parked);
+ schedule();
+ __set_current_state(TASK_INTERRUPTIBLE);
+ }
+ clear_bit(KTHREAD_IS_PARKED, &self->flags);
+ __set_current_state(TASK_RUNNING);
+}
+
+void kthread_parkme(void)
+{
+ __kthread_parkme(to_kthread(current));
+}
+
static int kthread(void *_create)
{
/* Copy data: it's on kthread's stack */
struct kthread self;
int ret;
- self.should_stop = 0;
+ self.flags = 0;
self.data = data;
init_completion(&self.exited);
+ init_completion(&self.parked);
current->vfork_done = &self.exited;
/* OK, tell user we're spawned, wait for stop or wakeup */
schedule();
ret = -EINTR;
- if (!self.should_stop)
- ret = threadfn(data);
+ if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
+ __kthread_parkme(&self);
+ ret = threadfn(data);
+ }
/* we can't just return, we must preserve "self" on stack */
do_exit(ret);
}
* Returns a task_struct or ERR_PTR(-ENOMEM).
*/
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
- void *data,
- int node,
+ void *data, int node,
const char namefmt[],
...)
{
}
EXPORT_SYMBOL(kthread_create_on_node);
+static void __kthread_bind(struct task_struct *p, unsigned int cpu)
+{
+ /* It's safe because the task is inactive. */
+ do_set_cpus_allowed(p, cpumask_of(cpu));
+ p->flags |= PF_THREAD_BOUND;
+}
+
/**
* kthread_bind - bind a just-created kthread to a cpu.
* @p: thread created by kthread_create().
WARN_ON(1);
return;
}
-
- /* It's safe because the task is inactive. */
- do_set_cpus_allowed(p, cpumask_of(cpu));
- p->flags |= PF_THREAD_BOUND;
+ __kthread_bind(p, cpu);
}
EXPORT_SYMBOL(kthread_bind);
+/**
+ * kthread_create_on_cpu - Create a cpu bound kthread
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @cpu: The cpu on which the thread should be bound,
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: This helper function creates and names a kernel thread
+ * The thread will be woken and put into park mode.
+ */
+struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
+ void *data, unsigned int cpu,
+ const char *namefmt)
+{
+ struct task_struct *p;
+
+ p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
+ cpu);
+ if (IS_ERR(p))
+ return p;
+ set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
+ to_kthread(p)->cpu = cpu;
+ /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
+ kthread_park(p);
+ return p;
+}
+
+static struct kthread *task_get_live_kthread(struct task_struct *k)
+{
+ struct kthread *kthread;
+
+ get_task_struct(k);
+ kthread = to_kthread(k);
+ /* It might have exited */
+ barrier();
+ if (k->vfork_done != NULL)
+ return kthread;
+ return NULL;
+}
+
+/**
+ * kthread_unpark - unpark a thread created by kthread_create().
+ * @k: thread created by kthread_create().
+ *
+ * Sets kthread_should_park() for @k to return false, wakes it, and
+ * waits for it to return. If the thread is marked percpu then its
+ * bound to the cpu again.
+ */
+void kthread_unpark(struct task_struct *k)
+{
+ struct kthread *kthread = task_get_live_kthread(k);
+
+ if (kthread) {
+ clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+ /*
+ * We clear the IS_PARKED bit here as we don't wait
+ * until the task has left the park code. So if we'd
+ * park before that happens we'd see the IS_PARKED bit
+ * which might be about to be cleared.
+ */
+ if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+ if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+ __kthread_bind(k, kthread->cpu);
+ wake_up_process(k);
+ }
+ }
+ put_task_struct(k);
+}
+
+/**
+ * kthread_park - park a thread created by kthread_create().
+ * @k: thread created by kthread_create().
+ *
+ * Sets kthread_should_park() for @k to return true, wakes it, and
+ * waits for it to return. This can also be called after kthread_create()
+ * instead of calling wake_up_process(): the thread will park without
+ * calling threadfn().
+ *
+ * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
+ * If called by the kthread itself just the park bit is set.
+ */
+int kthread_park(struct task_struct *k)
+{
+ struct kthread *kthread = task_get_live_kthread(k);
+ int ret = -ENOSYS;
+
+ if (kthread) {
+ if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+ set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+ if (k != current) {
+ wake_up_process(k);
+ wait_for_completion(&kthread->parked);
+ }
+ }
+ ret = 0;
+ }
+ put_task_struct(k);
+ return ret;
+}
+
/**
* kthread_stop - stop a thread created by kthread_create().
* @k: thread created by kthread_create().
*/
int kthread_stop(struct task_struct *k)
{
- struct kthread *kthread;
+ struct kthread *kthread = task_get_live_kthread(k);
int ret;
trace_sched_kthread_stop(k);
- get_task_struct(k);
-
- kthread = to_kthread(k);
- barrier(); /* it might have exited */
- if (k->vfork_done != NULL) {
- kthread->should_stop = 1;
+ if (kthread) {
+ set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
+ clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
wake_up_process(k);
wait_for_completion(&kthread->exited);
}