drm/panel: add panel power delay for of_panel
[firefly-linux-kernel-4.4.55.git] / kernel / jump_label.c
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17
18 #ifdef HAVE_JUMP_LABEL
19
20 /* mutex to protect coming/going of the the jump_label table */
21 static DEFINE_MUTEX(jump_label_mutex);
22
23 void jump_label_lock(void)
24 {
25         mutex_lock(&jump_label_mutex);
26 }
27
28 void jump_label_unlock(void)
29 {
30         mutex_unlock(&jump_label_mutex);
31 }
32
33 static int jump_label_cmp(const void *a, const void *b)
34 {
35         const struct jump_entry *jea = a;
36         const struct jump_entry *jeb = b;
37
38         if (jea->key < jeb->key)
39                 return -1;
40
41         if (jea->key > jeb->key)
42                 return 1;
43
44         return 0;
45 }
46
47 static void
48 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
49 {
50         unsigned long size;
51
52         size = (((unsigned long)stop - (unsigned long)start)
53                                         / sizeof(struct jump_entry));
54         sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
55 }
56
57 static void jump_label_update(struct static_key *key);
58
59 void static_key_slow_inc(struct static_key *key)
60 {
61         int v, v1;
62
63         STATIC_KEY_CHECK_USE();
64
65         /*
66          * Careful if we get concurrent static_key_slow_inc() calls;
67          * later calls must wait for the first one to _finish_ the
68          * jump_label_update() process.  At the same time, however,
69          * the jump_label_update() call below wants to see
70          * static_key_enabled(&key) for jumps to be updated properly.
71          *
72          * So give a special meaning to negative key->enabled: it sends
73          * static_key_slow_inc() down the slow path, and it is non-zero
74          * so it counts as "enabled" in jump_label_update().  Note that
75          * atomic_inc_unless_negative() checks >= 0, so roll our own.
76          */
77         for (v = atomic_read(&key->enabled); v > 0; v = v1) {
78                 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
79                 if (likely(v1 == v))
80                         return;
81         }
82
83         jump_label_lock();
84         if (atomic_read(&key->enabled) == 0) {
85                 atomic_set(&key->enabled, -1);
86                 jump_label_update(key);
87                 atomic_set(&key->enabled, 1);
88         } else {
89                 atomic_inc(&key->enabled);
90         }
91         jump_label_unlock();
92 }
93 EXPORT_SYMBOL_GPL(static_key_slow_inc);
94
95 static void __static_key_slow_dec(struct static_key *key,
96                 unsigned long rate_limit, struct delayed_work *work)
97 {
98         /*
99          * The negative count check is valid even when a negative
100          * key->enabled is in use by static_key_slow_inc(); a
101          * __static_key_slow_dec() before the first static_key_slow_inc()
102          * returns is unbalanced, because all other static_key_slow_inc()
103          * instances block while the update is in progress.
104          */
105         if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
106                 WARN(atomic_read(&key->enabled) < 0,
107                      "jump label: negative count!\n");
108                 return;
109         }
110
111         if (rate_limit) {
112                 atomic_inc(&key->enabled);
113                 schedule_delayed_work(work, rate_limit);
114         } else {
115                 jump_label_update(key);
116         }
117         jump_label_unlock();
118 }
119
120 static void jump_label_update_timeout(struct work_struct *work)
121 {
122         struct static_key_deferred *key =
123                 container_of(work, struct static_key_deferred, work.work);
124         __static_key_slow_dec(&key->key, 0, NULL);
125 }
126
127 void static_key_slow_dec(struct static_key *key)
128 {
129         STATIC_KEY_CHECK_USE();
130         __static_key_slow_dec(key, 0, NULL);
131 }
132 EXPORT_SYMBOL_GPL(static_key_slow_dec);
133
134 void static_key_slow_dec_deferred(struct static_key_deferred *key)
135 {
136         STATIC_KEY_CHECK_USE();
137         __static_key_slow_dec(&key->key, key->timeout, &key->work);
138 }
139 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
140
141 void jump_label_rate_limit(struct static_key_deferred *key,
142                 unsigned long rl)
143 {
144         STATIC_KEY_CHECK_USE();
145         key->timeout = rl;
146         INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
147 }
148 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
149
150 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
151 {
152         if (entry->code <= (unsigned long)end &&
153                 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
154                 return 1;
155
156         return 0;
157 }
158
159 static int __jump_label_text_reserved(struct jump_entry *iter_start,
160                 struct jump_entry *iter_stop, void *start, void *end)
161 {
162         struct jump_entry *iter;
163
164         iter = iter_start;
165         while (iter < iter_stop) {
166                 if (addr_conflict(iter, start, end))
167                         return 1;
168                 iter++;
169         }
170
171         return 0;
172 }
173
174 /*
175  * Update code which is definitely not currently executing.
176  * Architectures which need heavyweight synchronization to modify
177  * running code can override this to make the non-live update case
178  * cheaper.
179  */
180 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
181                                             enum jump_label_type type)
182 {
183         arch_jump_label_transform(entry, type);
184 }
185
186 static inline struct jump_entry *static_key_entries(struct static_key *key)
187 {
188         return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
189 }
190
191 static inline bool static_key_type(struct static_key *key)
192 {
193         return (unsigned long)key->entries & JUMP_TYPE_MASK;
194 }
195
196 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
197 {
198         return (struct static_key *)((unsigned long)entry->key & ~1UL);
199 }
200
201 static bool jump_entry_branch(struct jump_entry *entry)
202 {
203         return (unsigned long)entry->key & 1UL;
204 }
205
206 static enum jump_label_type jump_label_type(struct jump_entry *entry)
207 {
208         struct static_key *key = jump_entry_key(entry);
209         bool enabled = static_key_enabled(key);
210         bool branch = jump_entry_branch(entry);
211
212         /* See the comment in linux/jump_label.h */
213         return enabled ^ branch;
214 }
215
216 static void __jump_label_update(struct static_key *key,
217                                 struct jump_entry *entry,
218                                 struct jump_entry *stop)
219 {
220         for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
221                 /*
222                  * entry->code set to 0 invalidates module init text sections
223                  * kernel_text_address() verifies we are not in core kernel
224                  * init code, see jump_label_invalidate_module_init().
225                  */
226                 if (entry->code && kernel_text_address(entry->code))
227                         arch_jump_label_transform(entry, jump_label_type(entry));
228         }
229 }
230
231 void __init jump_label_init(void)
232 {
233         struct jump_entry *iter_start = __start___jump_table;
234         struct jump_entry *iter_stop = __stop___jump_table;
235         struct static_key *key = NULL;
236         struct jump_entry *iter;
237
238         jump_label_lock();
239         jump_label_sort_entries(iter_start, iter_stop);
240
241         for (iter = iter_start; iter < iter_stop; iter++) {
242                 struct static_key *iterk;
243
244                 /* rewrite NOPs */
245                 if (jump_label_type(iter) == JUMP_LABEL_NOP)
246                         arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
247
248                 iterk = jump_entry_key(iter);
249                 if (iterk == key)
250                         continue;
251
252                 key = iterk;
253                 /*
254                  * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
255                  */
256                 *((unsigned long *)&key->entries) += (unsigned long)iter;
257 #ifdef CONFIG_MODULES
258                 key->next = NULL;
259 #endif
260         }
261         static_key_initialized = true;
262         jump_label_unlock();
263 }
264
265 #ifdef CONFIG_MODULES
266
267 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
268 {
269         struct static_key *key = jump_entry_key(entry);
270         bool type = static_key_type(key);
271         bool branch = jump_entry_branch(entry);
272
273         /* See the comment in linux/jump_label.h */
274         return type ^ branch;
275 }
276
277 struct static_key_mod {
278         struct static_key_mod *next;
279         struct jump_entry *entries;
280         struct module *mod;
281 };
282
283 static int __jump_label_mod_text_reserved(void *start, void *end)
284 {
285         struct module *mod;
286
287         mod = __module_text_address((unsigned long)start);
288         if (!mod)
289                 return 0;
290
291         WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
292
293         return __jump_label_text_reserved(mod->jump_entries,
294                                 mod->jump_entries + mod->num_jump_entries,
295                                 start, end);
296 }
297
298 static void __jump_label_mod_update(struct static_key *key)
299 {
300         struct static_key_mod *mod;
301
302         for (mod = key->next; mod; mod = mod->next) {
303                 struct module *m = mod->mod;
304
305                 __jump_label_update(key, mod->entries,
306                                     m->jump_entries + m->num_jump_entries);
307         }
308 }
309
310 /***
311  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
312  * @mod: module to patch
313  *
314  * Allow for run-time selection of the optimal nops. Before the module
315  * loads patch these with arch_get_jump_label_nop(), which is specified by
316  * the arch specific jump label code.
317  */
318 void jump_label_apply_nops(struct module *mod)
319 {
320         struct jump_entry *iter_start = mod->jump_entries;
321         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
322         struct jump_entry *iter;
323
324         /* if the module doesn't have jump label entries, just return */
325         if (iter_start == iter_stop)
326                 return;
327
328         for (iter = iter_start; iter < iter_stop; iter++) {
329                 /* Only write NOPs for arch_branch_static(). */
330                 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
331                         arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
332         }
333 }
334
335 static int jump_label_add_module(struct module *mod)
336 {
337         struct jump_entry *iter_start = mod->jump_entries;
338         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
339         struct jump_entry *iter;
340         struct static_key *key = NULL;
341         struct static_key_mod *jlm;
342
343         /* if the module doesn't have jump label entries, just return */
344         if (iter_start == iter_stop)
345                 return 0;
346
347         jump_label_sort_entries(iter_start, iter_stop);
348
349         for (iter = iter_start; iter < iter_stop; iter++) {
350                 struct static_key *iterk;
351
352                 iterk = jump_entry_key(iter);
353                 if (iterk == key)
354                         continue;
355
356                 key = iterk;
357                 if (within_module(iter->key, mod)) {
358                         /*
359                          * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
360                          */
361                         *((unsigned long *)&key->entries) += (unsigned long)iter;
362                         key->next = NULL;
363                         continue;
364                 }
365                 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
366                 if (!jlm)
367                         return -ENOMEM;
368                 jlm->mod = mod;
369                 jlm->entries = iter;
370                 jlm->next = key->next;
371                 key->next = jlm;
372
373                 /* Only update if we've changed from our initial state */
374                 if (jump_label_type(iter) != jump_label_init_type(iter))
375                         __jump_label_update(key, iter, iter_stop);
376         }
377
378         return 0;
379 }
380
381 static void jump_label_del_module(struct module *mod)
382 {
383         struct jump_entry *iter_start = mod->jump_entries;
384         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
385         struct jump_entry *iter;
386         struct static_key *key = NULL;
387         struct static_key_mod *jlm, **prev;
388
389         for (iter = iter_start; iter < iter_stop; iter++) {
390                 if (jump_entry_key(iter) == key)
391                         continue;
392
393                 key = jump_entry_key(iter);
394
395                 if (within_module(iter->key, mod))
396                         continue;
397
398                 prev = &key->next;
399                 jlm = key->next;
400
401                 while (jlm && jlm->mod != mod) {
402                         prev = &jlm->next;
403                         jlm = jlm->next;
404                 }
405
406                 if (jlm) {
407                         *prev = jlm->next;
408                         kfree(jlm);
409                 }
410         }
411 }
412
413 static void jump_label_invalidate_module_init(struct module *mod)
414 {
415         struct jump_entry *iter_start = mod->jump_entries;
416         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
417         struct jump_entry *iter;
418
419         for (iter = iter_start; iter < iter_stop; iter++) {
420                 if (within_module_init(iter->code, mod))
421                         iter->code = 0;
422         }
423 }
424
425 static int
426 jump_label_module_notify(struct notifier_block *self, unsigned long val,
427                          void *data)
428 {
429         struct module *mod = data;
430         int ret = 0;
431
432         switch (val) {
433         case MODULE_STATE_COMING:
434                 jump_label_lock();
435                 ret = jump_label_add_module(mod);
436                 if (ret)
437                         jump_label_del_module(mod);
438                 jump_label_unlock();
439                 break;
440         case MODULE_STATE_GOING:
441                 jump_label_lock();
442                 jump_label_del_module(mod);
443                 jump_label_unlock();
444                 break;
445         case MODULE_STATE_LIVE:
446                 jump_label_lock();
447                 jump_label_invalidate_module_init(mod);
448                 jump_label_unlock();
449                 break;
450         }
451
452         return notifier_from_errno(ret);
453 }
454
455 struct notifier_block jump_label_module_nb = {
456         .notifier_call = jump_label_module_notify,
457         .priority = 1, /* higher than tracepoints */
458 };
459
460 static __init int jump_label_init_module(void)
461 {
462         return register_module_notifier(&jump_label_module_nb);
463 }
464 early_initcall(jump_label_init_module);
465
466 #endif /* CONFIG_MODULES */
467
468 /***
469  * jump_label_text_reserved - check if addr range is reserved
470  * @start: start text addr
471  * @end: end text addr
472  *
473  * checks if the text addr located between @start and @end
474  * overlaps with any of the jump label patch addresses. Code
475  * that wants to modify kernel text should first verify that
476  * it does not overlap with any of the jump label addresses.
477  * Caller must hold jump_label_mutex.
478  *
479  * returns 1 if there is an overlap, 0 otherwise
480  */
481 int jump_label_text_reserved(void *start, void *end)
482 {
483         int ret = __jump_label_text_reserved(__start___jump_table,
484                         __stop___jump_table, start, end);
485
486         if (ret)
487                 return ret;
488
489 #ifdef CONFIG_MODULES
490         ret = __jump_label_mod_text_reserved(start, end);
491 #endif
492         return ret;
493 }
494
495 static void jump_label_update(struct static_key *key)
496 {
497         struct jump_entry *stop = __stop___jump_table;
498         struct jump_entry *entry = static_key_entries(key);
499 #ifdef CONFIG_MODULES
500         struct module *mod;
501
502         __jump_label_mod_update(key);
503
504         preempt_disable();
505         mod = __module_address((unsigned long)key);
506         if (mod)
507                 stop = mod->jump_entries + mod->num_jump_entries;
508         preempt_enable();
509 #endif
510         /* if there are no users, entry can be NULL */
511         if (entry)
512                 __jump_label_update(key, entry, stop);
513 }
514
515 #ifdef CONFIG_STATIC_KEYS_SELFTEST
516 static DEFINE_STATIC_KEY_TRUE(sk_true);
517 static DEFINE_STATIC_KEY_FALSE(sk_false);
518
519 static __init int jump_label_test(void)
520 {
521         int i;
522
523         for (i = 0; i < 2; i++) {
524                 WARN_ON(static_key_enabled(&sk_true.key) != true);
525                 WARN_ON(static_key_enabled(&sk_false.key) != false);
526
527                 WARN_ON(!static_branch_likely(&sk_true));
528                 WARN_ON(!static_branch_unlikely(&sk_true));
529                 WARN_ON(static_branch_likely(&sk_false));
530                 WARN_ON(static_branch_unlikely(&sk_false));
531
532                 static_branch_disable(&sk_true);
533                 static_branch_enable(&sk_false);
534
535                 WARN_ON(static_key_enabled(&sk_true.key) == true);
536                 WARN_ON(static_key_enabled(&sk_false.key) == false);
537
538                 WARN_ON(static_branch_likely(&sk_true));
539                 WARN_ON(static_branch_unlikely(&sk_true));
540                 WARN_ON(!static_branch_likely(&sk_false));
541                 WARN_ON(!static_branch_unlikely(&sk_false));
542
543                 static_branch_enable(&sk_true);
544                 static_branch_disable(&sk_false);
545         }
546
547         return 0;
548 }
549 late_initcall(jump_label_test);
550 #endif /* STATIC_KEYS_SELFTEST */
551
552 #endif /* HAVE_JUMP_LABEL */