sched/preempt: Rearrange a few symbols after headers merge
authorFrederic Weisbecker <fweisbec@gmail.com>
Tue, 12 May 2015 14:41:47 +0000 (16:41 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 06:39:12 +0000 (08:39 +0200)
Adjust a few comments, and further integrate a few definitions after
the dumb headers copy.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-3-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/preempt.h

index 8cc0338a5e9a5252af1ab48e4d5d6df34943f0cf..37974cd4f0923e17a085425b1ef2fe56b137dca2 100644 (file)
@@ -9,14 +9,6 @@
 #include <linux/linkage.h>
 #include <linux/list.h>
 
-/*
- * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
- * the other bits -- can't include that header due to inclusion hell.
- */
-#define PREEMPT_NEED_RESCHED   0x80000000
-
-#include <asm/preempt.h>
-
 /*
  * We put the hardirq and softirq counter into the preemption
  * counter. The bitmask has the following meaning:
  * there are a few palaeontologic drivers which reenable interrupts in
  * the handler, so we need more than one bit here.
  *
- * PREEMPT_MASK:       0x000000ff
- * SOFTIRQ_MASK:       0x0000ff00
- * HARDIRQ_MASK:       0x000f0000
- *     NMI_MASK:       0x00100000
- * PREEMPT_ACTIVE:     0x00200000
+ *         PREEMPT_MASK:       0x000000ff
+ *         SOFTIRQ_MASK:       0x0000ff00
+ *         HARDIRQ_MASK:       0x000f0000
+ *             NMI_MASK:       0x00100000
+ *       PREEMPT_ACTIVE:       0x00200000
+ * PREEMPT_NEED_RESCHED:       0x80000000
  */
 #define PREEMPT_BITS   8
 #define SOFTIRQ_BITS   8
 #define PREEMPT_ACTIVE_SHIFT   (NMI_SHIFT + NMI_BITS)
 #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
 
+/* We use the MSB mostly because its available */
+#define PREEMPT_NEED_RESCHED   0x80000000
+
+/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
+#include <asm/preempt.h>
+
 #define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
 #define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
 #define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
 #define in_atomic_preempt_off() \
                ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
 
-#ifdef CONFIG_PREEMPT_COUNT
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#else
-# define preemptible() 0
-#endif
-
 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
 extern void preempt_count_add(int val);
 extern void preempt_count_sub(int val);
@@ -160,6 +153,8 @@ do { \
 
 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
+#define preemptible()  (preempt_count() == 0 && !irqs_disabled())
+
 #ifdef CONFIG_PREEMPT
 #define preempt_enable() \
 do { \
@@ -232,6 +227,7 @@ do { \
 #define preempt_disable_notrace()              barrier()
 #define preempt_enable_no_resched_notrace()    barrier()
 #define preempt_enable_notrace()               barrier()
+#define preemptible()                          0
 
 #endif /* CONFIG_PREEMPT_COUNT */