2 * workqueue.h --- work queue handling for Linux.
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <linux/threads.h>
13 #include <linux/atomic.h>
14 #include <linux/cpumask.h>
16 struct workqueue_struct;
19 typedef void (*work_func_t)(struct work_struct *work);
20 void delayed_work_timer_fn(unsigned long __data);
23 * The first word is the work queue pointer and the flags rolled into
26 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
29 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
30 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
31 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
32 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
33 #ifdef CONFIG_DEBUG_OBJECTS_WORK
34 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
35 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
37 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
40 WORK_STRUCT_COLOR_BITS = 4,
42 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
43 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
44 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
45 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
46 #ifdef CONFIG_DEBUG_OBJECTS_WORK
47 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
49 WORK_STRUCT_STATIC = 0,
53 * The last color is no color used for works which don't
54 * participate in workqueue flushing.
56 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
57 WORK_NO_COLOR = WORK_NR_COLORS,
60 WORK_CPU_UNBOUND = NR_CPUS,
61 WORK_CPU_END = NR_CPUS + 1,
64 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
65 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
69 WORK_STRUCT_COLOR_BITS,
71 /* data contains off-queue information when !WORK_STRUCT_PWQ */
72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
74 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
75 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
78 * When a work item is off queue, its high bits point to the last
79 * pool it was on. Cap at 31 bits and use the highest number to
80 * indicate that no pool is associated.
82 WORK_OFFQ_FLAG_BITS = 1,
83 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
84 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
85 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
86 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
88 /* convenience constants */
89 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
90 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
91 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
93 /* bit mask for work_busy() return values */
94 WORK_BUSY_PENDING = 1 << 0,
95 WORK_BUSY_RUNNING = 1 << 1,
97 /* maximum string length for set_worker_desc() */
103 struct list_head entry;
105 #ifdef CONFIG_LOCKDEP
106 struct lockdep_map lockdep_map;
110 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
111 #define WORK_DATA_STATIC_INIT() \
112 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
114 struct delayed_work {
115 struct work_struct work;
116 struct timer_list timer;
118 /* target workqueue and CPU ->timer uses to queue ->work */
119 struct workqueue_struct *wq;
124 * A struct for workqueue attributes. This can be used to change
125 * attributes of an unbound workqueue.
127 * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
128 * only modifies how apply_workqueue_attrs() select pools and thus doesn't
129 * participate in pool hash calculations or equality comparisons.
131 struct workqueue_attrs {
132 int nice; /* nice level */
133 cpumask_var_t cpumask; /* allowed CPUs */
134 bool no_numa; /* disable NUMA affinity */
137 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
139 return container_of(work, struct delayed_work, work);
142 struct execute_work {
143 struct work_struct work;
146 #ifdef CONFIG_LOCKDEP
148 * NB: because we have to copy the lockdep_map, setting _key
149 * here is required, otherwise it could get initialised to the
150 * copy of the lockdep_map!
152 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
153 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
155 #define __WORK_INIT_LOCKDEP_MAP(n, k)
158 #define __WORK_INITIALIZER(n, f) { \
159 .data = WORK_DATA_STATIC_INIT(), \
160 .entry = { &(n).entry, &(n).entry }, \
162 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
165 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
166 .work = __WORK_INITIALIZER((n).work, (f)), \
167 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
168 0, (unsigned long)&(n), \
169 (tflags) | TIMER_IRQSAFE), \
172 #define DECLARE_WORK(n, f) \
173 struct work_struct n = __WORK_INITIALIZER(n, f)
175 #define DECLARE_DELAYED_WORK(n, f) \
176 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
178 #define DECLARE_DEFERRABLE_WORK(n, f) \
179 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
182 * initialize a work item's function pointer
184 #define PREPARE_WORK(_work, _func) \
186 (_work)->func = (_func); \
189 #define PREPARE_DELAYED_WORK(_work, _func) \
190 PREPARE_WORK(&(_work)->work, (_func))
192 #ifdef CONFIG_DEBUG_OBJECTS_WORK
193 extern void __init_work(struct work_struct *work, int onstack);
194 extern void destroy_work_on_stack(struct work_struct *work);
195 static inline unsigned int work_static(struct work_struct *work)
197 return *work_data_bits(work) & WORK_STRUCT_STATIC;
200 static inline void __init_work(struct work_struct *work, int onstack) { }
201 static inline void destroy_work_on_stack(struct work_struct *work) { }
202 static inline unsigned int work_static(struct work_struct *work) { return 0; }
206 * initialize all of a work item in one go
208 * NOTE! No point in using "atomic_long_set()": using a direct
209 * assignment of the work data initializer allows the compiler
210 * to generate better code.
212 #ifdef CONFIG_LOCKDEP
213 #define __INIT_WORK(_work, _func, _onstack) \
215 static struct lock_class_key __key; \
217 __init_work((_work), _onstack); \
218 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
219 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
220 INIT_LIST_HEAD(&(_work)->entry); \
221 PREPARE_WORK((_work), (_func)); \
224 #define __INIT_WORK(_work, _func, _onstack) \
226 __init_work((_work), _onstack); \
227 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
228 INIT_LIST_HEAD(&(_work)->entry); \
229 PREPARE_WORK((_work), (_func)); \
233 #define INIT_WORK(_work, _func) \
235 __INIT_WORK((_work), (_func), 0); \
238 #define INIT_WORK_ONSTACK(_work, _func) \
240 __INIT_WORK((_work), (_func), 1); \
243 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
245 INIT_WORK(&(_work)->work, (_func)); \
246 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
247 (unsigned long)(_work), \
248 (_tflags) | TIMER_IRQSAFE); \
251 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
253 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
254 __setup_timer_on_stack(&(_work)->timer, \
255 delayed_work_timer_fn, \
256 (unsigned long)(_work), \
257 (_tflags) | TIMER_IRQSAFE); \
260 #define INIT_DELAYED_WORK(_work, _func) \
261 __INIT_DELAYED_WORK(_work, _func, 0)
263 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
264 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
266 #define INIT_DEFERRABLE_WORK(_work, _func) \
267 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
269 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
270 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
273 * work_pending - Find out whether a work item is currently pending
274 * @work: The work item in question
276 #define work_pending(work) \
277 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
280 * delayed_work_pending - Find out whether a delayable work item is currently
282 * @work: The work item in question
284 #define delayed_work_pending(w) \
285 work_pending(&(w)->work)
288 * work_clear_pending - for internal use only, mark a work item as not pending
289 * @work: The work item in question
291 #define work_clear_pending(work) \
292 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
295 * Workqueue flags and constants. For details, please refer to
296 * Documentation/workqueue.txt.
299 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
300 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
301 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
302 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
303 WQ_HIGHPRI = 1 << 4, /* high priority */
304 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
305 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
308 * Per-cpu workqueues are generally preferred because they tend to
309 * show better performance thanks to cache locality. Per-cpu
310 * workqueues exclude the scheduler from choosing the CPU to
311 * execute the worker threads, which has an unfortunate side effect
312 * of increasing power consumption.
314 * The scheduler considers a CPU idle if it doesn't have any task
315 * to execute and tries to keep idle cores idle to conserve power;
316 * however, for example, a per-cpu work item scheduled from an
317 * interrupt handler on an idle CPU will force the scheduler to
318 * excute the work item on that CPU breaking the idleness, which in
319 * turn may lead to more scheduling choices which are sub-optimal
320 * in terms of power consumption.
322 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
323 * but become unbound if workqueue.power_efficient kernel param is
324 * specified. Per-cpu workqueues which are identified to
325 * contribute significantly to power-consumption are identified and
326 * marked with this flag and enabling the power_efficient mode
327 * leads to noticeable power saving at the cost of small
328 * performance disadvantage.
330 * http://thread.gmane.org/gmane.linux.kernel/1480396
332 WQ_POWER_EFFICIENT = 1 << 7,
334 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
335 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
337 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
338 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
339 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
342 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
343 #define WQ_UNBOUND_MAX_ACTIVE \
344 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
347 * System-wide workqueues which are always present.
349 * system_wq is the one used by schedule[_delayed]_work[_on]().
350 * Multi-CPU multi-threaded. There are users which expect relatively
351 * short queue flush time. Don't queue works which can run for too
354 * system_long_wq is similar to system_wq but may host long running
355 * works. Queue flushing might take relatively long.
357 * system_unbound_wq is unbound workqueue. Workers are not bound to
358 * any specific CPU, not concurrency managed, and all queued works are
359 * executed immediately as long as max_active limit is not reached and
360 * resources are available.
362 * system_freezable_wq is equivalent to system_wq except that it's
365 * *_power_efficient_wq are inclined towards saving power and converted
366 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
367 * they are same as their non-power-efficient counterparts - e.g.
368 * system_power_efficient_wq is identical to system_wq if
369 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
371 extern struct workqueue_struct *system_wq;
372 extern struct workqueue_struct *system_long_wq;
373 extern struct workqueue_struct *system_unbound_wq;
374 extern struct workqueue_struct *system_freezable_wq;
375 extern struct workqueue_struct *system_power_efficient_wq;
376 extern struct workqueue_struct *system_freezable_power_efficient_wq;
378 static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
383 static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
385 return system_freezable_wq;
388 /* equivlalent to system_wq and system_freezable_wq, deprecated */
389 #define system_nrt_wq __system_nrt_wq()
390 #define system_nrt_freezable_wq __system_nrt_freezable_wq()
392 extern struct workqueue_struct *
393 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
394 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
397 * alloc_workqueue - allocate a workqueue
398 * @fmt: printf format for the name of the workqueue
400 * @max_active: max in-flight work items, 0 for default
401 * @args: args for @fmt
403 * Allocate a workqueue with the specified parameters. For detailed
404 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
406 * The __lock_name macro dance is to guarantee that single lock_class_key
407 * doesn't end up with different namesm, which isn't allowed by lockdep.
410 * Pointer to the allocated workqueue on success, %NULL on failure.
412 #ifdef CONFIG_LOCKDEP
413 #define alloc_workqueue(fmt, flags, max_active, args...) \
415 static struct lock_class_key __key; \
416 const char *__lock_name; \
418 if (__builtin_constant_p(fmt)) \
419 __lock_name = (fmt); \
421 __lock_name = #fmt; \
423 __alloc_workqueue_key((fmt), (flags), (max_active), \
424 &__key, __lock_name, ##args); \
427 #define alloc_workqueue(fmt, flags, max_active, args...) \
428 __alloc_workqueue_key((fmt), (flags), (max_active), \
433 * alloc_ordered_workqueue - allocate an ordered workqueue
434 * @fmt: printf format for the name of the workqueue
435 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
436 * @args: args for @fmt
438 * Allocate an ordered workqueue. An ordered workqueue executes at
439 * most one work item at any given time in the queued order. They are
440 * implemented as unbound workqueues with @max_active of one.
443 * Pointer to the allocated workqueue on success, %NULL on failure.
445 #define alloc_ordered_workqueue(fmt, flags, args...) \
446 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
448 #define create_workqueue(name) \
449 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
450 #define create_freezable_workqueue(name) \
451 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
452 #define create_singlethread_workqueue(name) \
453 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
455 extern void destroy_workqueue(struct workqueue_struct *wq);
457 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
458 void free_workqueue_attrs(struct workqueue_attrs *attrs);
459 int apply_workqueue_attrs(struct workqueue_struct *wq,
460 const struct workqueue_attrs *attrs);
462 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
463 struct work_struct *work);
464 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
465 struct delayed_work *work, unsigned long delay);
466 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
467 struct delayed_work *dwork, unsigned long delay);
469 extern void flush_workqueue(struct workqueue_struct *wq);
470 extern void drain_workqueue(struct workqueue_struct *wq);
471 extern void flush_scheduled_work(void);
473 extern int schedule_on_each_cpu(work_func_t func);
475 int execute_in_process_context(work_func_t fn, struct execute_work *);
477 extern bool flush_work(struct work_struct *work);
478 extern bool cancel_work_sync(struct work_struct *work);
480 extern bool flush_delayed_work(struct delayed_work *dwork);
481 extern bool cancel_delayed_work(struct delayed_work *dwork);
482 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
484 extern void workqueue_set_max_active(struct workqueue_struct *wq,
486 extern bool current_is_workqueue_rescuer(void);
487 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
488 extern unsigned int work_busy(struct work_struct *work);
489 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
490 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
493 * queue_work - queue work on a workqueue
494 * @wq: workqueue to use
495 * @work: work to queue
497 * Returns %false if @work was already on a queue, %true otherwise.
499 * We queue the work to the CPU on which it was submitted, but if the CPU dies
500 * it can be processed by another CPU.
502 static inline bool queue_work(struct workqueue_struct *wq,
503 struct work_struct *work)
505 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
509 * queue_delayed_work - queue work on a workqueue after delay
510 * @wq: workqueue to use
511 * @dwork: delayable work to queue
512 * @delay: number of jiffies to wait before queueing
514 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
516 static inline bool queue_delayed_work(struct workqueue_struct *wq,
517 struct delayed_work *dwork,
520 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
524 * mod_delayed_work - modify delay of or queue a delayed work
525 * @wq: workqueue to use
526 * @dwork: work to queue
527 * @delay: number of jiffies to wait before queueing
529 * mod_delayed_work_on() on local CPU.
531 static inline bool mod_delayed_work(struct workqueue_struct *wq,
532 struct delayed_work *dwork,
535 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
539 * schedule_work_on - put work task on a specific cpu
540 * @cpu: cpu to put the work task on
541 * @work: job to be done
543 * This puts a job on a specific cpu
545 static inline bool schedule_work_on(int cpu, struct work_struct *work)
547 return queue_work_on(cpu, system_wq, work);
551 * schedule_work - put work task in global workqueue
552 * @work: job to be done
554 * Returns %false if @work was already on the kernel-global workqueue and
557 * This puts a job in the kernel-global workqueue if it was not already
558 * queued and leaves it in the same position on the kernel-global
559 * workqueue otherwise.
561 static inline bool schedule_work(struct work_struct *work)
563 return queue_work(system_wq, work);
567 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
569 * @dwork: job to be done
570 * @delay: number of jiffies to wait
572 * After waiting for a given time this puts a job in the kernel-global
573 * workqueue on the specified CPU.
575 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
578 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
582 * schedule_delayed_work - put work task in global workqueue after delay
583 * @dwork: job to be done
584 * @delay: number of jiffies to wait or 0 for immediate execution
586 * After waiting for a given time this puts a job in the kernel-global
589 static inline bool schedule_delayed_work(struct delayed_work *dwork,
592 return queue_delayed_work(system_wq, dwork, delay);
596 * keventd_up - is workqueue initialized yet?
598 static inline bool keventd_up(void)
600 return system_wq != NULL;
604 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
605 * if it returns 0 the timer function may be running and the queueing is in
608 static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
612 ret = del_timer(&work->timer);
614 work_clear_pending(&work->work);
618 /* used to be different but now identical to flush_work(), deprecated */
619 static inline bool __deprecated flush_work_sync(struct work_struct *work)
621 return flush_work(work);
624 /* used to be different but now identical to flush_delayed_work(), deprecated */
625 static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
627 return flush_delayed_work(dwork);
631 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
636 long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
637 #endif /* CONFIG_SMP */
639 #ifdef CONFIG_FREEZER
640 extern void freeze_workqueues_begin(void);
641 extern bool freeze_workqueues_busy(void);
642 extern void thaw_workqueues(void);
643 #endif /* CONFIG_FREEZER */
646 int workqueue_sysfs_register(struct workqueue_struct *wq);
647 #else /* CONFIG_SYSFS */
648 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
650 #endif /* CONFIG_SYSFS */