Merge branches 'fixes.2015.07.22a' and 'initexp.2015.08.04a' into HEAD
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 4 Aug 2015 15:40:58 +0000 (08:40 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 4 Aug 2015 15:40:58 +0000 (08:40 -0700)
fixes.2015.07.22a: Miscellaneous fixes.
initexp.2015.08.04a: Initialization and expedited updates.
(Single branch due to conflicts.)

26 files changed:
Documentation/RCU/whatisRCU.txt
MAINTAINERS
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/traps.c
drivers/base/power/opp.c
include/linux/fdtable.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/types.h
init/Kconfig
kernel/cgroup.c
kernel/cpu.c
kernel/pid.c
kernel/rcu/rcutorture.c
kernel/rcu/srcu.c
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/sched/core.c
kernel/time/Kconfig
kernel/workqueue.c
lib/Kconfig.debug
scripts/checkpatch.pl
security/device_cgroup.c

index 5746b0c77f3e4c53da9d68b1213bae991f062092..adc2184009c5ecec9ba4615291ca8998743b48e2 100644 (file)
@@ -883,7 +883,7 @@ All:  lockdep-checked RCU-protected pointer access
 
        rcu_access_pointer
        rcu_dereference_raw
-       rcu_lockdep_assert
+       RCU_LOCKDEP_WARN
        rcu_sleep_check
        RCU_NONIDLE
 
index 8133cefb6b6e28715197a86aad555c29edbb7aa1..f92d8136a4a4d5330d127562cb610b723bd4bd06 100644 (file)
@@ -8445,7 +8445,7 @@ M:        "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 M:     Josh Triplett <josh@joshtriplett.org>
 R:     Steven Rostedt <rostedt@goodmis.org>
 R:     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-R:     Lai Jiangshan <laijs@cn.fujitsu.com>
+R:     Lai Jiangshan <jiangshanlai@gmail.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
@@ -8472,7 +8472,7 @@ M:        "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 M:     Josh Triplett <josh@joshtriplett.org>
 R:     Steven Rostedt <rostedt@goodmis.org>
 R:     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-R:     Lai Jiangshan <laijs@cn.fujitsu.com>
+R:     Lai Jiangshan <jiangshanlai@gmail.com>
 L:     linux-kernel@vger.kernel.org
 W:     http://www.rdrop.com/users/paulmck/RCU/
 S:     Supported
@@ -9340,7 +9340,7 @@ F:        include/linux/sl?b*.h
 F:     mm/sl?b*
 
 SLEEPABLE READ-COPY UPDATE (SRCU)
-M:     Lai Jiangshan <laijs@cn.fujitsu.com>
+M:     Lai Jiangshan <jiangshanlai@gmail.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 M:     Josh Triplett <josh@joshtriplett.org>
 R:     Steven Rostedt <rostedt@goodmis.org>
index df919ff103c3ae845e727388765ebc1842df0cbc..3d6b5269fb2ea77c5b57dd99d27d60ad231891ce 100644 (file)
@@ -54,9 +54,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
 
 #define rcu_dereference_check_mce(p) \
 ({ \
-       rcu_lockdep_assert(rcu_read_lock_sched_held() || \
-                          lockdep_is_held(&mce_chrdev_read_mutex), \
-                          "suspicious rcu_dereference_check_mce() usage"); \
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+                        !lockdep_is_held(&mce_chrdev_read_mutex), \
+                        "suspicious rcu_dereference_check_mce() usage"); \
        smp_load_acquire(&(p)); \
 })
 
index f5791927aa644493354dd487a35a1111fdf676d6..c5a5231d1d116143ea39c8334af0e9963f2d6529 100644 (file)
@@ -136,7 +136,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
        preempt_count_add(HARDIRQ_OFFSET);
 
        /* This code is a bit fragile.  Test it. */
-       rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
+       RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
 
        return prev_state;
 }
index 677fb28435532b4274eda86696d986a12da800a1..3b188f20b43fa4ace687cdc48ec947d94e947088 100644 (file)
@@ -110,8 +110,8 @@ static DEFINE_MUTEX(dev_opp_list_lock);
 
 #define opp_rcu_lockdep_assert()                                       \
 do {                                                                   \
-       rcu_lockdep_assert(rcu_read_lock_held() ||                      \
-                               lockdep_is_held(&dev_opp_list_lock),    \
+       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
+                               !lockdep_is_held(&dev_opp_list_lock),   \
                           "Missing rcu_read_lock() or "                \
                           "dev_opp_list_lock protection");             \
 } while (0)
index fbb88740634af011af6cc1be098f0a6d20aba07f..674e3e226465a0b21a283e068d77d665eca84052 100644 (file)
@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
 
 static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
 {
-       rcu_lockdep_assert(rcu_read_lock_held() ||
-                          lockdep_is_held(&files->file_lock),
+       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+                          !lockdep_is_held(&files->file_lock),
                           "suspicious rcu_dereference_check() usage");
        return __fcheck_files(files, fd);
 }
index 4cf5f51b4c9c43c2900d8fd5fad2ea93f873b33b..ff476515f7163ab1b0247cfd64c0a4f6a364b2f3 100644 (file)
@@ -226,6 +226,37 @@ struct rcu_synchronize {
 };
 void wakeme_after_rcu(struct rcu_head *head);
 
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+                  struct rcu_synchronize *rs_array);
+
+#define _wait_rcu_gp(checktiny, ...) \
+do { \
+       call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+       const int __n = ARRAY_SIZE(__crcu_array); \
+       struct rcu_synchronize __rs_array[__n]; \
+       \
+       __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
+} while (0)
+
+#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+
+/**
+ * synchronize_rcu_mult - Wait concurrently for multiple grace periods
+ * @...: List of call_rcu() functions for the flavors to wait on.
+ *
+ * This macro waits concurrently for multiple flavors of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
+ * on concurrent RCU and RCU-bh grace periods.  Waiting on a give SRCU
+ * domain requires you to write a wrapper function for that SRCU domain's
+ * call_srcu() function, supplying the corresponding srcu_struct.
+ *
+ * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
+ * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
+ * is automatically a grace period.
+ */
+#define synchronize_rcu_mult(...) \
+       _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+
 /**
  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
  * @head: structure to be used for queueing the RCU updates.
@@ -309,7 +340,7 @@ static inline void rcu_sysrq_end(void)
 }
 #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
 void rcu_user_enter(void);
 void rcu_user_exit(void);
 #else
@@ -317,7 +348,7 @@ static inline void rcu_user_enter(void) { }
 static inline void rcu_user_exit(void) { }
 static inline void rcu_user_hooks_switch(struct task_struct *prev,
                                         struct task_struct *next) { }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
 
 #ifdef CONFIG_RCU_NOCB_CPU
 void rcu_init_nohz(void);
@@ -392,10 +423,6 @@ bool __rcu_is_watching(void);
  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
  */
 
-typedef void call_rcu_func_t(struct rcu_head *head,
-                            void (*func)(struct rcu_head *head));
-void wait_rcu_gp(call_rcu_func_t crf);
-
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
 #include <linux/rcutree.h>
 #elif defined(CONFIG_TINY_RCU)
@@ -469,46 +496,10 @@ int rcu_read_lock_bh_held(void);
  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
  * RCU-sched read-side critical section.  In absence of
  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
- * critical section unless it can prove otherwise.  Note that disabling
- * of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section.  This is useful for debug checks in functions
- * that required that they be called within an RCU-sched read-side
- * critical section.
- *
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
- * and while lockdep is disabled.
- *
- * Note that if the CPU is in the idle loop from an RCU point of
- * view (ie: that we are in the section between rcu_idle_enter() and
- * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
- * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
- * that are in such a section, considering these as in extended quiescent
- * state, so such a CPU is effectively never in an RCU read-side critical
- * section regardless of what RCU primitives it invokes.  This state of
- * affairs is required --- we need to keep an RCU-free window in idle
- * where the CPU may possibly enter into low power mode. This way we can
- * notice an extended quiescent state to other CPUs that started a grace
- * period. Otherwise we would delay any grace period as long as we run in
- * the idle task.
- *
- * Similarly, we avoid claiming an SRCU read lock held if the current
- * CPU is offline.
+ * critical section unless it can prove otherwise.
  */
 #ifdef CONFIG_PREEMPT_COUNT
-static inline int rcu_read_lock_sched_held(void)
-{
-       int lockdep_opinion = 0;
-
-       if (!debug_lockdep_rcu_enabled())
-               return 1;
-       if (!rcu_is_watching())
-               return 0;
-       if (!rcu_lockdep_current_cpu_online())
-               return 0;
-       if (debug_locks)
-               lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
-       return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
-}
+int rcu_read_lock_sched_held(void);
 #else /* #ifdef CONFIG_PREEMPT_COUNT */
 static inline int rcu_read_lock_sched_held(void)
 {
@@ -545,6 +536,11 @@ static inline int rcu_read_lock_sched_held(void)
 
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
+/* Deprecate rcu_lockdep_assert():  Use RCU_LOCKDEP_WARN() instead. */
+static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
+{
+}
+
 #ifdef CONFIG_PROVE_RCU
 
 /**
@@ -555,17 +551,32 @@ static inline int rcu_read_lock_sched_held(void)
 #define rcu_lockdep_assert(c, s)                                       \
        do {                                                            \
                static bool __section(.data.unlikely) __warned;         \
+               deprecate_rcu_lockdep_assert();                         \
                if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
                        __warned = true;                                \
                        lockdep_rcu_suspicious(__FILE__, __LINE__, s);  \
                }                                                       \
        } while (0)
 
+/**
+ * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
+ * @c: condition to check
+ * @s: informative message
+ */
+#define RCU_LOCKDEP_WARN(c, s)                                         \
+       do {                                                            \
+               static bool __section(.data.unlikely) __warned;         \
+               if (debug_lockdep_rcu_enabled() && !__warned && (c)) {  \
+                       __warned = true;                                \
+                       lockdep_rcu_suspicious(__FILE__, __LINE__, s);  \
+               }                                                       \
+       } while (0)
+
 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
 static inline void rcu_preempt_sleep_check(void)
 {
-       rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
-                          "Illegal context switch in RCU read-side critical section");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+                        "Illegal context switch in RCU read-side critical section");
 }
 #else /* #ifdef CONFIG_PROVE_RCU */
 static inline void rcu_preempt_sleep_check(void)
@@ -576,15 +587,16 @@ static inline void rcu_preempt_sleep_check(void)
 #define rcu_sleep_check()                                              \
        do {                                                            \
                rcu_preempt_sleep_check();                              \
-               rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),     \
-                                  "Illegal context switch in RCU-bh read-side critical section"); \
-               rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),  \
-                                  "Illegal context switch in RCU-sched read-side critical section"); \
+               RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),        \
+                                "Illegal context switch in RCU-bh read-side critical section"); \
+               RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),     \
+                                "Illegal context switch in RCU-sched read-side critical section"); \
        } while (0)
 
 #else /* #ifdef CONFIG_PROVE_RCU */
 
-#define rcu_lockdep_assert(c, s) do { } while (0)
+#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
 #define rcu_sleep_check() do { } while (0)
 
 #endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -615,13 +627,13 @@ static inline void rcu_preempt_sleep_check(void)
 ({ \
        /* Dependency order vs. p above. */ \
        typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
-       rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+       RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
        rcu_dereference_sparse(p, space); \
        ((typeof(*p) __force __kernel *)(________p1)); \
 })
 #define __rcu_dereference_protected(p, c, space) \
 ({ \
-       rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+       RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
        rcu_dereference_sparse(p, space); \
        ((typeof(*p) __force __kernel *)(p)); \
 })
@@ -845,8 +857,8 @@ static inline void rcu_read_lock(void)
        __rcu_read_lock();
        __acquire(RCU);
        rcu_lock_acquire(&rcu_lock_map);
-       rcu_lockdep_assert(rcu_is_watching(),
-                          "rcu_read_lock() used illegally while idle");
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+                        "rcu_read_lock() used illegally while idle");
 }
 
 /*
@@ -896,8 +908,8 @@ static inline void rcu_read_lock(void)
  */
 static inline void rcu_read_unlock(void)
 {
-       rcu_lockdep_assert(rcu_is_watching(),
-                          "rcu_read_unlock() used illegally while idle");
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+                        "rcu_read_unlock() used illegally while idle");
        __release(RCU);
        __rcu_read_unlock();
        rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -925,8 +937,8 @@ static inline void rcu_read_lock_bh(void)
        local_bh_disable();
        __acquire(RCU_BH);
        rcu_lock_acquire(&rcu_bh_lock_map);
-       rcu_lockdep_assert(rcu_is_watching(),
-                          "rcu_read_lock_bh() used illegally while idle");
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+                        "rcu_read_lock_bh() used illegally while idle");
 }
 
 /*
@@ -936,8 +948,8 @@ static inline void rcu_read_lock_bh(void)
  */
 static inline void rcu_read_unlock_bh(void)
 {
-       rcu_lockdep_assert(rcu_is_watching(),
-                          "rcu_read_unlock_bh() used illegally while idle");
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+                        "rcu_read_unlock_bh() used illegally while idle");
        rcu_lock_release(&rcu_bh_lock_map);
        __release(RCU_BH);
        local_bh_enable();
@@ -961,8 +973,8 @@ static inline void rcu_read_lock_sched(void)
        preempt_disable();
        __acquire(RCU_SCHED);
        rcu_lock_acquire(&rcu_sched_lock_map);
-       rcu_lockdep_assert(rcu_is_watching(),
-                          "rcu_read_lock_sched() used illegally while idle");
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+                        "rcu_read_lock_sched() used illegally while idle");
 }
 
 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -979,8 +991,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
  */
 static inline void rcu_read_unlock_sched(void)
 {
-       rcu_lockdep_assert(rcu_is_watching(),
-                          "rcu_read_unlock_sched() used illegally while idle");
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+                        "rcu_read_unlock_sched() used illegally while idle");
        rcu_lock_release(&rcu_sched_lock_map);
        __release(RCU_SCHED);
        preempt_enable();
@@ -1031,7 +1043,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 #define RCU_INIT_POINTER(p, v) \
        do { \
                rcu_dereference_sparse(p, __rcu); \
-               p = RCU_INITIALIZER(v); \
+               WRITE_ONCE(p, RCU_INITIALIZER(v)); \
        } while (0)
 
 /**
index 3df6c1ec4e25503583cb14656474d6727495a530..ff968b7af3a4b61c9496f5222a7f600a42f5eca6 100644 (file)
@@ -37,6 +37,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
        might_sleep();
 }
 
+static inline unsigned long get_state_synchronize_sched(void)
+{
+       return 0;
+}
+
+static inline void cond_synchronize_sched(unsigned long oldstate)
+{
+       might_sleep();
+}
+
 static inline void rcu_barrier_bh(void)
 {
        wait_rcu_gp(call_rcu_bh);
index 456879143f89f9db45d0f79315f728f50a9f9d0c..5abec82f325ecf5e2a0c06c21d566f94f5559721 100644 (file)
@@ -76,6 +76,8 @@ void rcu_barrier_bh(void);
 void rcu_barrier_sched(void);
 unsigned long get_state_synchronize_rcu(void);
 void cond_synchronize_rcu(unsigned long oldstate);
+unsigned long get_state_synchronize_sched(void);
+void cond_synchronize_sched(unsigned long oldstate);
 
 extern unsigned long rcutorture_testseq;
 extern unsigned long rcutorture_vernum;
index 8715287c3b1f636d21f01acb4ce9220a59764b08..c314989d91585fa028c8b287aca521dd298abf07 100644 (file)
@@ -212,6 +212,9 @@ struct callback_head {
 };
 #define rcu_head callback_head
 
+typedef void (*rcu_callback_t)(struct rcu_head *head);
+typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+
 /* clocksource cycle base type */
 typedef u64 cycle_t;
 
index af09b4fb43d291856708eb78197adb74dd1b4f48..ba1e6eaf4c36e72bdf29d0b683b9c118fb0b38db 100644 (file)
@@ -538,15 +538,6 @@ config RCU_STALL_COMMON
 config CONTEXT_TRACKING
        bool
 
-config RCU_USER_QS
-       bool
-       help
-         This option sets hooks on kernel / userspace boundaries and
-         puts RCU in extended quiescent state when the CPU runs in
-         userspace. It means that when a CPU runs in userspace, it is
-         excluded from the global RCU state machine and thus doesn't
-         try to keep the timer tick on for RCU.
-
 config CONTEXT_TRACKING_FORCE
        bool "Force context tracking"
        depends on CONTEXT_TRACKING
@@ -707,6 +698,7 @@ config RCU_BOOST_DELAY
 config RCU_NOCB_CPU
        bool "Offload RCU callback processing from boot-selected CPUs"
        depends on TREE_RCU || PREEMPT_RCU
+       depends on RCU_EXPERT || NO_HZ_FULL
        default n
        help
          Use this option to reduce OS jitter for aggressive HPC or
index f89d9292eee62540ff1c2f81ed0a658cbd279642..b89f3168411bc1e9f4f96a42fe5732f3e36c1d41 100644 (file)
@@ -107,8 +107,8 @@ static DEFINE_SPINLOCK(release_agent_path_lock);
 struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
 
 #define cgroup_assert_mutex_or_rcu_locked()                            \
-       rcu_lockdep_assert(rcu_read_lock_held() ||                      \
-                          lockdep_is_held(&cgroup_mutex),              \
+       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
+                          !lockdep_is_held(&cgroup_mutex),             \
                           "cgroup_mutex or RCU read lock required");
 
 /*
index 9c9c9fab16cc3610afa76a6c467780482b35b0be..d63b062b62679d46af89d9028154f3e0b9d6e0ab 100644 (file)
@@ -380,14 +380,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
         * will observe it.
         *
         * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
-        * not imply sync_sched(), so explicitly call both.
+        * not imply sync_sched(), so wait for both.
         *
         * Do sync before park smpboot threads to take care the rcu boost case.
         */
-#ifdef CONFIG_PREEMPT
-       synchronize_sched();
-#endif
-       synchronize_rcu();
+       if (IS_ENABLED(CONFIG_PREEMPT))
+               synchronize_rcu_mult(call_rcu, call_rcu_sched);
+       else
+               synchronize_rcu();
 
        smpboot_park_threads(cpu);
 
index 4fd07d5b7baf3b0176388b8ce0f06511b56ba2cc..ca368793808e37688f7b0219b54d458fede30765 100644 (file)
@@ -451,9 +451,8 @@ EXPORT_SYMBOL(pid_task);
  */
 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 {
-       rcu_lockdep_assert(rcu_read_lock_held(),
-                          "find_task_by_pid_ns() needs rcu_read_lock()"
-                          " protection");
+       RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+                        "find_task_by_pid_ns() needs rcu_read_lock() protection");
        return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
 }
 
index 59e32684c23b58714ecb26215856f67866cfc58e..0f2cb55f0ab3b917b33e1df7caa17cb12dc7bbfc 100644 (file)
@@ -635,6 +635,8 @@ static struct rcu_torture_ops sched_ops = {
        .deferred_free  = rcu_sched_torture_deferred_free,
        .sync           = synchronize_sched,
        .exp_sync       = synchronize_sched_expedited,
+       .get_state      = get_state_synchronize_sched,
+       .cond_sync      = cond_synchronize_sched,
        .call           = call_rcu_sched,
        .cb_barrier     = rcu_barrier_sched,
        .fqs            = rcu_sched_force_quiescent_state,
index fb33d35ee0b7c0ecdb6df0cb0562050abcc6707b..d3fcb2ec8536724615cbdd5ce5a891cbcc020934 100644 (file)
@@ -252,14 +252,15 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
 }
 
 /**
- * srcu_readers_active - returns approximate number of readers.
+ * srcu_readers_active - returns true if there are readers. and false
+ *                       otherwise
  * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
  *
  * Note that this is not an atomic primitive, and can therefore suffer
  * severe errors when invoked on an active srcu_struct.  That said, it
  * can be useful as an error check at cleanup time.
  */
-static int srcu_readers_active(struct srcu_struct *sp)
+static bool srcu_readers_active(struct srcu_struct *sp)
 {
        int cpu;
        unsigned long sum = 0;
@@ -414,11 +415,11 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
        struct rcu_head *head = &rcu.head;
        bool done = false;
 
-       rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
-                          !lock_is_held(&rcu_bh_lock_map) &&
-                          !lock_is_held(&rcu_lock_map) &&
-                          !lock_is_held(&rcu_sched_lock_map),
-                          "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
+       RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
+                        lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
 
        might_sleep();
        init_completion(&rcu.completion);
index c291bd65d2cbb40a1895d735a9183a1dd441237f..d0471056d0afac18ed1739981aaa415495414791 100644 (file)
@@ -191,10 +191,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
  */
 void synchronize_sched(void)
 {
-       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-                          !lock_is_held(&rcu_lock_map) &&
-                          !lock_is_held(&rcu_sched_lock_map),
-                          "Illegal synchronize_sched() in RCU read-side critical section");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_sched() in RCU read-side critical section");
        cond_resched();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
index 439112e9d1b3e77298ca82f86fb4f0e225765feb..9f75f25cc5d92667c27d70dd1b1a6091b42fbceb 100644 (file)
@@ -646,12 +646,12 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
         * It is illegal to enter an extended quiescent state while
         * in an RCU read-side critical section.
         */
-       rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
-                          "Illegal idle entry in RCU read-side critical section.");
-       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
-                          "Illegal idle entry in RCU-bh read-side critical section.");
-       rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
-                          "Illegal idle entry in RCU-sched read-side critical section.");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+                        "Illegal idle entry in RCU read-side critical section.");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
+                        "Illegal idle entry in RCU-bh read-side critical section.");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
+                        "Illegal idle entry in RCU-sched read-side critical section.");
 }
 
 /*
@@ -698,7 +698,7 @@ void rcu_idle_enter(void)
 }
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
 /**
  * rcu_user_enter - inform RCU that we are resuming userspace.
  *
@@ -711,7 +711,7 @@ void rcu_user_enter(void)
 {
        rcu_eqs_enter(1);
 }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
 
 /**
  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
@@ -825,7 +825,7 @@ void rcu_idle_exit(void)
 }
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
 /**
  * rcu_user_exit - inform RCU that we are exiting userspace.
  *
@@ -836,7 +836,7 @@ void rcu_user_exit(void)
 {
        rcu_eqs_exit(1);
 }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
 
 /**
  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
@@ -975,9 +975,9 @@ bool notrace rcu_is_watching(void)
 {
        bool ret;
 
-       preempt_disable();
+       preempt_disable_notrace();
        ret = __rcu_is_watching();
-       preempt_enable();
+       preempt_enable_notrace();
        return ret;
 }
 EXPORT_SYMBOL_GPL(rcu_is_watching);
@@ -3180,10 +3180,10 @@ static inline int rcu_blocking_is_gp(void)
  */
 void synchronize_sched(void)
 {
-       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-                          !lock_is_held(&rcu_lock_map) &&
-                          !lock_is_held(&rcu_sched_lock_map),
-                          "Illegal synchronize_sched() in RCU-sched read-side critical section");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_sched() in RCU-sched read-side critical section");
        if (rcu_blocking_is_gp())
                return;
        if (rcu_gp_is_expedited())
@@ -3207,10 +3207,10 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
  */
 void synchronize_rcu_bh(void)
 {
-       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-                          !lock_is_held(&rcu_lock_map) &&
-                          !lock_is_held(&rcu_sched_lock_map),
-                          "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
        if (rcu_blocking_is_gp())
                return;
        if (rcu_gp_is_expedited())
@@ -3272,6 +3272,58 @@ void cond_synchronize_rcu(unsigned long oldstate)
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
 
+/**
+ * get_state_synchronize_sched - Snapshot current RCU-sched state
+ *
+ * Returns a cookie that is used by a later call to cond_synchronize_sched()
+ * to determine whether or not a full grace period has elapsed in the
+ * meantime.
+ */
+unsigned long get_state_synchronize_sched(void)
+{
+       /*
+        * Any prior manipulation of RCU-protected data must happen
+        * before the load from ->gpnum.
+        */
+       smp_mb();  /* ^^^ */
+
+       /*
+        * Make sure this load happens before the purportedly
+        * time-consuming work between get_state_synchronize_sched()
+        * and cond_synchronize_sched().
+        */
+       return smp_load_acquire(&rcu_sched_state.gpnum);
+}
+EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
+
+/**
+ * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
+ *
+ * @oldstate: return value from earlier call to get_state_synchronize_sched()
+ *
+ * If a full RCU-sched grace period has elapsed since the earlier call to
+ * get_state_synchronize_sched(), just return.  Otherwise, invoke
+ * synchronize_sched() to wait for a full grace period.
+ *
+ * Yes, this function does not take counter wrap into account.  But
+ * counter wrap is harmless.  If the counter wraps, we have waited for
+ * more than 2 billion grace periods (and way more on a 64-bit system!),
+ * so waiting for one additional grace period should be just fine.
+ */
+void cond_synchronize_sched(unsigned long oldstate)
+{
+       unsigned long newstate;
+
+       /*
+        * Ensure that this load happens before any RCU-destructive
+        * actions the caller might carry out after we return.
+        */
+       newstate = smp_load_acquire(&rcu_sched_state.completed);
+       if (ULONG_CMP_GE(oldstate, newstate))
+               synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(cond_synchronize_sched);
+
 /* Adjust sequence number for start of update-side operation. */
 static void rcu_seq_start(unsigned long *sp)
 {
index 27b714601c6e515030aaf5702a11b28cb8c0cb21..b2bf3963a0aee328d0d3bfc0d9e997b3a5ca106f 100644 (file)
@@ -522,10 +522,10 @@ EXPORT_SYMBOL_GPL(call_rcu);
  */
 void synchronize_rcu(void)
 {
-       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-                          !lock_is_held(&rcu_lock_map) &&
-                          !lock_is_held(&rcu_sched_lock_map),
-                          "Illegal synchronize_rcu() in RCU read-side critical section");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_rcu() in RCU read-side critical section");
        if (!rcu_scheduler_active)
                return;
        if (rcu_gp_is_expedited())
@@ -1003,8 +1003,7 @@ static int rcu_boost(struct rcu_node *rnp)
 }
 
 /*
- * Priority-boosting kthread.  One per leaf rcu_node and one for the
- * root rcu_node.
+ * Priority-boosting kthread, one per leaf rcu_node.
  */
 static int rcu_boost_kthread(void *arg)
 {
@@ -1622,12 +1621,10 @@ static int rcu_oom_notify(struct notifier_block *self,
         */
        atomic_set(&oom_callback_count, 1);
 
-       get_online_cpus();
        for_each_online_cpu(cpu) {
                smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
                cond_resched_rcu_qs();
        }
-       put_online_cpus();
 
        /* Unconditionally decrement: no need to wake ourselves up. */
        atomic_dec(&oom_callback_count);
index afaecb7a799af235f63afb6877050cf348e4247c..7a0b3bc7c5ed147162bec6a600e0bfbded04257d 100644 (file)
@@ -62,6 +62,55 @@ MODULE_ALIAS("rcupdate");
 
 module_param(rcu_expedited, int, 0);
 
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
+/**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+ *
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
+ * RCU-sched read-side critical section.  In absence of
+ * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
+ * critical section unless it can prove otherwise.  Note that disabling
+ * of preemption (including disabling irqs) counts as an RCU-sched
+ * read-side critical section.  This is useful for debug checks in functions
+ * that required that they be called within an RCU-sched read-side
+ * critical section.
+ *
+ * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of
+ * view (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
+ * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
+ * that are in such a section, considering these as in extended quiescent
+ * state, so such a CPU is effectively never in an RCU read-side critical
+ * section regardless of what RCU primitives it invokes.  This state of
+ * affairs is required --- we need to keep an RCU-free window in idle
+ * where the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run in
+ * the idle task.
+ *
+ * Similarly, we avoid claiming an SRCU read lock held if the current
+ * CPU is offline.
+ */
+int rcu_read_lock_sched_held(void)
+{
+       int lockdep_opinion = 0;
+
+       if (!debug_lockdep_rcu_enabled())
+               return 1;
+       if (!rcu_is_watching())
+               return 0;
+       if (!rcu_lockdep_current_cpu_online())
+               return 0;
+       if (debug_locks)
+               lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
+       return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
+}
+EXPORT_SYMBOL(rcu_read_lock_sched_held);
+#endif
+
 #ifndef CONFIG_TINY_RCU
 
 static atomic_t rcu_expedited_nesting =
@@ -269,20 +318,37 @@ void wakeme_after_rcu(struct rcu_head *head)
        rcu = container_of(head, struct rcu_synchronize, head);
        complete(&rcu->completion);
 }
+EXPORT_SYMBOL_GPL(wakeme_after_rcu);
 
-void wait_rcu_gp(call_rcu_func_t crf)
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+                  struct rcu_synchronize *rs_array)
 {
-       struct rcu_synchronize rcu;
+       int i;
 
-       init_rcu_head_on_stack(&rcu.head);
-       init_completion(&rcu.completion);
-       /* Will wake me after RCU finished. */
-       crf(&rcu.head, wakeme_after_rcu);
-       /* Wait for it. */
-       wait_for_completion(&rcu.completion);
-       destroy_rcu_head_on_stack(&rcu.head);
+       /* Initialize and register callbacks for each flavor specified. */
+       for (i = 0; i < n; i++) {
+               if (checktiny &&
+                   (crcu_array[i] == call_rcu ||
+                    crcu_array[i] == call_rcu_bh)) {
+                       might_sleep();
+                       continue;
+               }
+               init_rcu_head_on_stack(&rs_array[i].head);
+               init_completion(&rs_array[i].completion);
+               (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
+       }
+
+       /* Wait for all callbacks to be invoked. */
+       for (i = 0; i < n; i++) {
+               if (checktiny &&
+                   (crcu_array[i] == call_rcu ||
+                    crcu_array[i] == call_rcu_bh))
+                       continue;
+               wait_for_completion(&rs_array[i].completion);
+               destroy_rcu_head_on_stack(&rs_array[i].head);
+       }
 }
-EXPORT_SYMBOL_GPL(wait_rcu_gp);
+EXPORT_SYMBOL_GPL(__wait_rcu_gp);
 
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
 void init_rcu_head(struct rcu_head *head)
@@ -523,8 +589,8 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
 void synchronize_rcu_tasks(void)
 {
        /* Complain if the scheduler has not started.  */
-       rcu_lockdep_assert(!rcu_scheduler_active,
-                          "synchronize_rcu_tasks called too soon");
+       RCU_LOCKDEP_WARN(!rcu_scheduler_active,
+                        "synchronize_rcu_tasks called too soon");
 
        /* Wait for the grace period. */
        wait_rcu_gp(call_rcu_tasks);
index 78b4bad10081c6b23894ac1d5d7b6900ab32362a..5e73c79fadd001d0bc898824549df6d5c89350b0 100644 (file)
@@ -2200,8 +2200,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
 #ifdef CONFIG_SMP
 inline struct dl_bw *dl_bw_of(int i)
 {
-       rcu_lockdep_assert(rcu_read_lock_sched_held(),
-                          "sched RCU must be held");
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+                        "sched RCU must be held");
        return &cpu_rq(i)->rd->dl_bw;
 }
 
@@ -2210,8 +2210,8 @@ static inline int dl_bw_cpus(int i)
        struct root_domain *rd = cpu_rq(i)->rd;
        int cpus = 0;
 
-       rcu_lockdep_assert(rcu_read_lock_sched_held(),
-                          "sched RCU must be held");
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+                        "sched RCU must be held");
        for_each_cpu_and(i, rd->span, cpu_active_mask)
                cpus++;
 
index 579ce1b929afde343a29fc77e4e7c4997ea18852..4008d9f95dd724de5fd898d6a6ffd34520563f86 100644 (file)
@@ -92,12 +92,10 @@ config NO_HZ_FULL
        depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
        # We need at least one periodic CPU for timekeeping
        depends on SMP
-       # RCU_USER_QS dependency
        depends on HAVE_CONTEXT_TRACKING
        # VIRT_CPU_ACCOUNTING_GEN dependency
        depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
        select NO_HZ_COMMON
-       select RCU_USER_QS
        select RCU_NOCB_CPU
        select VIRT_CPU_ACCOUNTING_GEN
        select IRQ_WORK
index 4c4f06176f748616b180254e94eda6bbed7dde25..cb91c63b4f4a3b25ac6fb27b211d341ba013bc48 100644 (file)
@@ -338,20 +338,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
 #include <trace/events/workqueue.h>
 
 #define assert_rcu_or_pool_mutex()                                     \
-       rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
-                          lockdep_is_held(&wq_pool_mutex),             \
-                          "sched RCU or wq_pool_mutex should be held")
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
+                        !lockdep_is_held(&wq_pool_mutex),              \
+                        "sched RCU or wq_pool_mutex should be held")
 
 #define assert_rcu_or_wq_mutex(wq)                                     \
-       rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
-                          lockdep_is_held(&wq->mutex),                 \
-                          "sched RCU or wq->mutex should be held")
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
+                        !lockdep_is_held(&wq->mutex),                  \
+                        "sched RCU or wq->mutex should be held")
 
 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                       \
-       rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
-                          lockdep_is_held(&wq->mutex) ||               \
-                          lockdep_is_held(&wq_pool_mutex),             \
-                          "sched RCU, wq->mutex or wq_pool_mutex should be held")
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
+                        !lockdep_is_held(&wq->mutex) &&                \
+                        !lockdep_is_held(&wq_pool_mutex),              \
+                        "sched RCU, wq->mutex or wq_pool_mutex should be held")
 
 #define for_each_cpu_worker_pool(pool, cpu)                            \
        for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
index 8a34205d6922c48cc91ddd496edd3f84418f5248..3e0b662cae09611b5628c474cc58e448c3cefdc7 100644 (file)
@@ -1365,7 +1365,7 @@ config RCU_TRACE
          Say N if you are unsure.
 
 config RCU_EQS_DEBUG
-       bool "Use this when adding any sort of NO_HZ support to your arch"
+       bool "Provide debugging asserts for adding NO_HZ support to an arch"
        depends on DEBUG_KERNEL
        help
          This option provides consistency checks in RCU's handling of
index 90e1edc8dd42fbfba3c31d03214f99d4e3c361b4..976e7117edfbb4040fb93517fca4fb43c2283c5f 100755 (executable)
@@ -5011,6 +5011,7 @@ sub process {
                                     "memory barrier without comment\n" . $herecurr);
                        }
                }
+
 # check for waitqueue_active without a comment.
                if ($line =~ /\bwaitqueue_active\s*\(/) {
                        if (!ctx_has_comment($first_line, $linenr)) {
@@ -5018,6 +5019,24 @@ sub process {
                                     "waitqueue_active without comment\n" . $herecurr);
                        }
                }
+
+# Check for expedited grace periods that interrupt non-idle non-nohz
+# online CPUs.  These expedited can therefore degrade real-time response
+# if used carelessly, and should be avoided where not absolutely
+# needed.  It is always OK to use synchronize_rcu_expedited() and
+# synchronize_sched_expedited() at boot time (before real-time applications
+# start) and in error situations where real-time response is compromised in
+# any case.  Note that synchronize_srcu_expedited() does -not- interrupt
+# other CPUs, so don't warn on uses of synchronize_srcu_expedited().
+# Of course, nothing comes for free, and srcu_read_lock() and
+# srcu_read_unlock() do contain full memory barriers in payment for
+# synchronize_srcu_expedited() non-interruption properties.
+               if ($line =~ /\b(synchronize_rcu_expedited|synchronize_sched_expedited)\(/) {
+                       WARN("EXPEDITED_RCU_GRACE_PERIOD",
+                            "expedited RCU grace periods should be avoided where they can degrade real-time response\n" . $herecurr);
+
+               }
+
 # check of hardware specific defines
                if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
                        CHK("ARCH_DEFINES",
index 188c1d26393b56bfd13814e7834119a440410114..73455089feef3a11af0d88880fa6423070c50ea5 100644 (file)
@@ -400,9 +400,9 @@ static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
 {
        bool match = false;
 
-       rcu_lockdep_assert(rcu_read_lock_held() ||
-                          lockdep_is_held(&devcgroup_mutex),
-                          "device_cgroup:verify_new_ex called without proper synchronization");
+       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+                        lockdep_is_held(&devcgroup_mutex),
+                        "device_cgroup:verify_new_ex called without proper synchronization");
 
        if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
                if (behavior == DEVCG_DEFAULT_ALLOW) {