1 #ifndef _SCHED_SYSCTL_H
2 #define _SCHED_SYSCTL_H
4 #ifdef CONFIG_DETECT_HUNG_TASK
5 extern int sysctl_hung_task_check_count;
6 extern unsigned int sysctl_hung_task_panic;
7 extern unsigned long sysctl_hung_task_timeout_secs;
8 extern int sysctl_hung_task_warnings;
9 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
11 size_t *lenp, loff_t *ppos);
13 /* Avoid need for ifdefs elsewhere in the code */
14 enum { sysctl_hung_task_timeout_secs = 0 };
18 * Default maximum number of active map areas, this limits the number of vmas
19 * per mm struct. Users can overwrite this number by sysctl but there is a
22 * When a program's coredump is generated as ELF format, a section is created
23 * per a vma. In ELF, the number of sections is represented in unsigned short.
24 * This means the number of sections should be smaller than 65535 at coredump.
25 * Because the kernel adds some informative sections to a image of program at
26 * generating coredump, we need some margin. The number of extra sections is
27 * 1-3 now and depends on arch. We use "5" as safe margin, here.
29 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
30 * not a hard limit any more. Although some userspace tools can be surprised by
33 #define MAPCOUNT_ELF_CORE_MARGIN (5)
34 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
36 extern int sysctl_max_map_count;
38 extern unsigned int sysctl_sched_latency;
39 extern unsigned int sysctl_sched_min_granularity;
40 extern unsigned int sysctl_sched_wakeup_granularity;
41 extern unsigned int sysctl_sched_child_runs_first;
42 extern unsigned int sysctl_sched_is_big_little;
43 extern unsigned int sysctl_sched_sync_hint_enable;
44 extern unsigned int sysctl_sched_initial_task_util;
45 extern unsigned int sysctl_sched_cstate_aware;
46 #ifdef CONFIG_SCHED_WALT
47 extern unsigned int sysctl_sched_use_walt_cpu_util;
48 extern unsigned int sysctl_sched_use_walt_task_util;
49 extern unsigned int sysctl_sched_walt_init_task_load_pct;
50 extern unsigned int sysctl_sched_walt_cpu_high_irqload;
53 enum sched_tunable_scaling {
54 SCHED_TUNABLESCALING_NONE,
55 SCHED_TUNABLESCALING_LOG,
56 SCHED_TUNABLESCALING_LINEAR,
57 SCHED_TUNABLESCALING_END,
59 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
61 extern unsigned int sysctl_numa_balancing_scan_delay;
62 extern unsigned int sysctl_numa_balancing_scan_period_min;
63 extern unsigned int sysctl_numa_balancing_scan_period_max;
64 extern unsigned int sysctl_numa_balancing_scan_size;
66 #ifdef CONFIG_SCHED_DEBUG
67 extern unsigned int sysctl_sched_migration_cost;
68 extern unsigned int sysctl_sched_nr_migrate;
69 extern unsigned int sysctl_sched_time_avg;
70 extern unsigned int sysctl_sched_shares_window;
72 int sched_proc_update_handler(struct ctl_table *table, int write,
73 void __user *buffer, size_t *length,
78 * control realtime throttling:
80 * /proc/sys/kernel/sched_rt_period_us
81 * /proc/sys/kernel/sched_rt_runtime_us
83 extern unsigned int sysctl_sched_rt_period;
84 extern int sysctl_sched_rt_runtime;
86 #ifdef CONFIG_CFS_BANDWIDTH
87 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
90 #ifdef CONFIG_SCHED_TUNE
91 extern unsigned int sysctl_sched_cfs_boost;
92 int sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
93 void __user *buffer, size_t *length,
95 static inline unsigned int get_sysctl_sched_cfs_boost(void)
97 return sysctl_sched_cfs_boost;
100 static inline unsigned int get_sysctl_sched_cfs_boost(void)
106 #ifdef CONFIG_SCHED_AUTOGROUP
107 extern unsigned int sysctl_sched_autogroup_enabled;
110 extern int sched_rr_timeslice;
112 extern int sched_rr_handler(struct ctl_table *table, int write,
113 void __user *buffer, size_t *lenp,
116 extern int sched_rt_handler(struct ctl_table *table, int write,
117 void __user *buffer, size_t *lenp,
120 extern int sysctl_numa_balancing(struct ctl_table *table, int write,
121 void __user *buffer, size_t *lenp,
124 #endif /* _SCHED_SYSCTL_H */