2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/regulator/consumer.h>
28 * The root of the list of all devices. All device_opp structures branch off
29 * from here, with each device_opp containing the list of opp it supports in
30 * various states of availability.
32 static LIST_HEAD(dev_opp_list);
33 /* Lock to allow exclusive modification to the device and opp lists */
34 DEFINE_MUTEX(dev_opp_list_lock);
36 #define opp_rcu_lockdep_assert() \
38 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
39 !lockdep_is_held(&dev_opp_list_lock), \
40 "Missing rcu_read_lock() or " \
41 "dev_opp_list_lock protection"); \
44 static struct device_list_opp *_find_list_dev(const struct device *dev,
45 struct device_opp *dev_opp)
47 struct device_list_opp *list_dev;
49 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
50 if (list_dev->dev == dev)
56 static struct device_opp *_managed_opp(const struct device_node *np)
58 struct device_opp *dev_opp;
60 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
61 if (dev_opp->np == np) {
63 * Multiple devices can point to the same OPP table and
64 * so will have same node-pointer, np.
66 * But the OPPs will be considered as shared only if the
67 * OPP table contains a "opp-shared" property.
69 return dev_opp->shared_opp ? dev_opp : NULL;
77 * _find_device_opp() - find device_opp struct using device pointer
78 * @dev: device pointer used to lookup device OPPs
80 * Search list of device OPPs for one containing matching device. Does a RCU
81 * reader operation to grab the pointer needed.
83 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
84 * -EINVAL based on type of error.
86 * Locking: For readers, this function must be called under rcu_read_lock().
87 * device_opp is a RCU protected pointer, which means that device_opp is valid
88 * as long as we are under RCU lock.
90 * For Writers, this function must be called with dev_opp_list_lock held.
92 struct device_opp *_find_device_opp(struct device *dev)
94 struct device_opp *dev_opp;
96 opp_rcu_lockdep_assert();
98 if (IS_ERR_OR_NULL(dev)) {
99 pr_err("%s: Invalid parameters\n", __func__);
100 return ERR_PTR(-EINVAL);
103 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
104 if (_find_list_dev(dev, dev_opp))
107 return ERR_PTR(-ENODEV);
111 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
112 * @opp: opp for which voltage has to be returned for
114 * Return: voltage in micro volt corresponding to the opp, else
117 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
118 * protected pointer. This means that opp which could have been fetched by
119 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
120 * under RCU lock. The pointer returned by the opp_find_freq family must be
121 * used in the same section as the usage of this function with the pointer
122 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
125 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
127 struct dev_pm_opp *tmp_opp;
130 opp_rcu_lockdep_assert();
132 tmp_opp = rcu_dereference(opp);
133 if (IS_ERR_OR_NULL(tmp_opp))
134 pr_err("%s: Invalid parameters\n", __func__);
140 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
143 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
144 * @opp: opp for which frequency has to be returned for
146 * Return: frequency in hertz corresponding to the opp, else
149 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
150 * protected pointer. This means that opp which could have been fetched by
151 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
152 * under RCU lock. The pointer returned by the opp_find_freq family must be
153 * used in the same section as the usage of this function with the pointer
154 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
157 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
159 struct dev_pm_opp *tmp_opp;
162 opp_rcu_lockdep_assert();
164 tmp_opp = rcu_dereference(opp);
165 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
166 pr_err("%s: Invalid parameters\n", __func__);
172 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
175 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
176 * @opp: opp for which turbo mode is being verified
178 * Turbo OPPs are not for normal use, and can be enabled (under certain
179 * conditions) for short duration of times to finish high throughput work
180 * quickly. Running on them for longer times may overheat the chip.
182 * Return: true if opp is turbo opp, else false.
184 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
185 * protected pointer. This means that opp which could have been fetched by
186 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
187 * under RCU lock. The pointer returned by the opp_find_freq family must be
188 * used in the same section as the usage of this function with the pointer
189 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
192 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
194 struct dev_pm_opp *tmp_opp;
196 opp_rcu_lockdep_assert();
198 tmp_opp = rcu_dereference(opp);
199 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
200 pr_err("%s: Invalid parameters\n", __func__);
204 return tmp_opp->turbo;
206 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
209 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
210 * @dev: device for which we do this operation
212 * Return: This function returns the max clock latency in nanoseconds.
214 * Locking: This function takes rcu_read_lock().
216 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
218 struct device_opp *dev_opp;
219 unsigned long clock_latency_ns;
223 dev_opp = _find_device_opp(dev);
225 clock_latency_ns = 0;
227 clock_latency_ns = dev_opp->clock_latency_ns_max;
230 return clock_latency_ns;
232 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
235 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
236 * @dev: device for which we do this operation
238 * Return: This function returns the max voltage latency in nanoseconds.
240 * Locking: This function takes rcu_read_lock().
242 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
244 struct device_opp *dev_opp;
245 struct dev_pm_opp *opp;
246 struct regulator *reg;
247 unsigned long latency_ns = 0;
248 unsigned long min_uV = ~0, max_uV = 0;
253 dev_opp = _find_device_opp(dev);
254 if (IS_ERR(dev_opp)) {
259 reg = dev_opp->regulator;
261 /* Regulator may not be required for device */
263 dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
269 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
273 if (opp->u_volt_min < min_uV)
274 min_uV = opp->u_volt_min;
275 if (opp->u_volt_max > max_uV)
276 max_uV = opp->u_volt_max;
282 * The caller needs to ensure that dev_opp (and hence the regulator)
283 * isn't freed, while we are executing this routine.
285 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
287 latency_ns = ret * 1000;
291 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
294 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
296 * @dev: device for which we do this operation
298 * Return: This function returns the max transition latency, in nanoseconds, to
299 * switch from one OPP to other.
301 * Locking: This function takes rcu_read_lock().
303 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
305 return dev_pm_opp_get_max_volt_latency(dev) +
306 dev_pm_opp_get_max_clock_latency(dev);
308 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
311 * dev_pm_opp_get_suspend_opp() - Get suspend opp
312 * @dev: device for which we do this operation
314 * Return: This function returns pointer to the suspend opp if it is
315 * defined and available, otherwise it returns NULL.
317 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
318 * protected pointer. The reason for the same is that the opp pointer which is
319 * returned will remain valid for use with opp_get_{voltage, freq} only while
320 * under the locked area. The pointer returned must be used prior to unlocking
321 * with rcu_read_unlock() to maintain the integrity of the pointer.
323 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
325 struct device_opp *dev_opp;
327 opp_rcu_lockdep_assert();
329 dev_opp = _find_device_opp(dev);
330 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
331 !dev_opp->suspend_opp->available)
334 return dev_opp->suspend_opp;
336 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
339 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
340 * @dev: device for which we do this operation
342 * Return: This function returns the number of available opps if there are any,
343 * else returns 0 if none or the corresponding error value.
345 * Locking: This function takes rcu_read_lock().
347 int dev_pm_opp_get_opp_count(struct device *dev)
349 struct device_opp *dev_opp;
350 struct dev_pm_opp *temp_opp;
355 dev_opp = _find_device_opp(dev);
356 if (IS_ERR(dev_opp)) {
357 count = PTR_ERR(dev_opp);
358 dev_err(dev, "%s: device OPP not found (%d)\n",
363 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
364 if (temp_opp->available)
372 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
375 * dev_pm_opp_find_freq_exact() - search for an exact frequency
376 * @dev: device for which we do this operation
377 * @freq: frequency to search for
378 * @available: true/false - match for available opp
380 * Return: Searches for exact match in the opp list and returns pointer to the
381 * matching opp if found, else returns ERR_PTR in case of error and should
382 * be handled using IS_ERR. Error return values can be:
383 * EINVAL: for bad pointer
384 * ERANGE: no match found for search
385 * ENODEV: if device not found in list of registered devices
387 * Note: available is a modifier for the search. if available=true, then the
388 * match is for exact matching frequency and is available in the stored OPP
389 * table. if false, the match is for exact frequency which is not available.
391 * This provides a mechanism to enable an opp which is not available currently
392 * or the opposite as well.
394 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
395 * protected pointer. The reason for the same is that the opp pointer which is
396 * returned will remain valid for use with opp_get_{voltage, freq} only while
397 * under the locked area. The pointer returned must be used prior to unlocking
398 * with rcu_read_unlock() to maintain the integrity of the pointer.
400 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
404 struct device_opp *dev_opp;
405 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
407 opp_rcu_lockdep_assert();
409 dev_opp = _find_device_opp(dev);
410 if (IS_ERR(dev_opp)) {
411 int r = PTR_ERR(dev_opp);
412 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
416 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
417 if (temp_opp->available == available &&
418 temp_opp->rate == freq) {
426 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
429 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
430 * @dev: device for which we do this operation
431 * @freq: Start frequency
433 * Search for the matching ceil *available* OPP from a starting freq
436 * Return: matching *opp and refreshes *freq accordingly, else returns
437 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
439 * EINVAL: for bad pointer
440 * ERANGE: no match found for search
441 * ENODEV: if device not found in list of registered devices
443 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
444 * protected pointer. The reason for the same is that the opp pointer which is
445 * returned will remain valid for use with opp_get_{voltage, freq} only while
446 * under the locked area. The pointer returned must be used prior to unlocking
447 * with rcu_read_unlock() to maintain the integrity of the pointer.
449 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
452 struct device_opp *dev_opp;
453 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
455 opp_rcu_lockdep_assert();
458 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
459 return ERR_PTR(-EINVAL);
462 dev_opp = _find_device_opp(dev);
464 return ERR_CAST(dev_opp);
466 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
467 if (temp_opp->available && temp_opp->rate >= *freq) {
476 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
479 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
480 * @dev: device for which we do this operation
481 * @freq: Start frequency
483 * Search for the matching floor *available* OPP from a starting freq
486 * Return: matching *opp and refreshes *freq accordingly, else returns
487 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
489 * EINVAL: for bad pointer
490 * ERANGE: no match found for search
491 * ENODEV: if device not found in list of registered devices
493 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
494 * protected pointer. The reason for the same is that the opp pointer which is
495 * returned will remain valid for use with opp_get_{voltage, freq} only while
496 * under the locked area. The pointer returned must be used prior to unlocking
497 * with rcu_read_unlock() to maintain the integrity of the pointer.
499 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
502 struct device_opp *dev_opp;
503 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
505 opp_rcu_lockdep_assert();
508 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
509 return ERR_PTR(-EINVAL);
512 dev_opp = _find_device_opp(dev);
514 return ERR_CAST(dev_opp);
516 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
517 if (temp_opp->available) {
518 /* go to the next node, before choosing prev */
519 if (temp_opp->rate > *freq)
530 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
533 * The caller needs to ensure that device_opp (and hence the clk) isn't freed,
534 * while clk returned here is used.
536 static struct clk *_get_opp_clk(struct device *dev)
538 struct device_opp *dev_opp;
543 dev_opp = _find_device_opp(dev);
544 if (IS_ERR(dev_opp)) {
545 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
546 clk = ERR_CAST(dev_opp);
552 dev_err(dev, "%s: No clock available for the device\n",
560 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
561 unsigned long u_volt, unsigned long u_volt_min,
562 unsigned long u_volt_max)
566 /* Regulator not available for device */
568 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
573 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
576 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
579 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
580 __func__, u_volt_min, u_volt, u_volt_max, ret);
586 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
587 * @dev: device for which we do this operation
588 * @target_freq: frequency to achieve
590 * This configures the power-supplies and clock source to the levels specified
591 * by the OPP corresponding to the target_freq.
593 * Locking: This function takes rcu_read_lock().
595 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
597 struct device_opp *dev_opp;
598 struct dev_pm_opp *old_opp, *opp;
599 struct regulator *reg;
601 unsigned long freq, old_freq;
602 unsigned long u_volt, u_volt_min, u_volt_max;
603 unsigned long ou_volt, ou_volt_min, ou_volt_max;
606 if (unlikely(!target_freq)) {
607 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
612 clk = _get_opp_clk(dev);
616 freq = clk_round_rate(clk, target_freq);
620 old_freq = clk_get_rate(clk);
622 /* Return early if nothing to do */
623 if (old_freq == freq) {
624 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
631 dev_opp = _find_device_opp(dev);
632 if (IS_ERR(dev_opp)) {
633 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
635 return PTR_ERR(dev_opp);
638 old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
639 if (!IS_ERR(old_opp)) {
640 ou_volt = old_opp->u_volt;
641 ou_volt_min = old_opp->u_volt_min;
642 ou_volt_max = old_opp->u_volt_max;
644 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
645 __func__, old_freq, PTR_ERR(old_opp));
648 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
651 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
652 __func__, freq, ret);
657 u_volt = opp->u_volt;
658 u_volt_min = opp->u_volt_min;
659 u_volt_max = opp->u_volt_max;
661 reg = dev_opp->regulator;
665 /* Scaling up? Scale voltage before frequency */
666 if (freq > old_freq) {
667 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
670 goto restore_voltage;
673 /* Change frequency */
675 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
676 __func__, old_freq, freq);
678 ret = clk_set_rate(clk, freq);
680 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
682 goto restore_voltage;
685 /* Scaling down? Scale voltage after frequency */
686 if (freq < old_freq) {
687 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
696 if (clk_set_rate(clk, old_freq))
697 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
700 /* This shouldn't harm even if the voltages weren't updated earlier */
701 if (!IS_ERR(old_opp))
702 _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
706 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
708 /* List-dev Helpers */
709 static void _kfree_list_dev_rcu(struct rcu_head *head)
711 struct device_list_opp *list_dev;
713 list_dev = container_of(head, struct device_list_opp, rcu_head);
714 kfree_rcu(list_dev, rcu_head);
717 static void _remove_list_dev(struct device_list_opp *list_dev,
718 struct device_opp *dev_opp)
720 opp_debug_unregister(list_dev, dev_opp);
721 list_del(&list_dev->node);
722 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
723 _kfree_list_dev_rcu);
726 struct device_list_opp *_add_list_dev(const struct device *dev,
727 struct device_opp *dev_opp)
729 struct device_list_opp *list_dev;
732 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
736 /* Initialize list-dev */
738 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
740 /* Create debugfs entries for the dev_opp */
741 ret = opp_debug_register(list_dev, dev_opp);
743 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
750 * _add_device_opp() - Find device OPP table or allocate a new one
751 * @dev: device for which we do this operation
753 * It tries to find an existing table first, if it couldn't find one, it
754 * allocates a new OPP table and returns that.
756 * Return: valid device_opp pointer if success, else NULL.
758 static struct device_opp *_add_device_opp(struct device *dev)
760 struct device_opp *dev_opp;
761 struct device_list_opp *list_dev;
762 struct device_node *np;
765 /* Check for existing list for 'dev' first */
766 dev_opp = _find_device_opp(dev);
767 if (!IS_ERR(dev_opp))
771 * Allocate a new device OPP table. In the infrequent case where a new
772 * device is needed to be added, we pay this penalty.
774 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
778 INIT_LIST_HEAD(&dev_opp->dev_list);
780 list_dev = _add_list_dev(dev, dev_opp);
787 * Only required for backward compatibility with v1 bindings, but isn't
788 * harmful for other cases. And so we do it unconditionally.
790 np = of_node_get(dev->of_node);
794 if (!of_property_read_u32(np, "clock-latency", &val))
795 dev_opp->clock_latency_ns_max = val;
796 of_property_read_u32(np, "voltage-tolerance",
797 &dev_opp->voltage_tolerance_v1);
801 /* Set regulator to a non-NULL error value */
802 dev_opp->regulator = ERR_PTR(-ENXIO);
804 /* Find clk for the device */
805 dev_opp->clk = clk_get(dev, NULL);
806 if (IS_ERR(dev_opp->clk)) {
807 ret = PTR_ERR(dev_opp->clk);
808 if (ret != -EPROBE_DEFER)
809 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
813 srcu_init_notifier_head(&dev_opp->srcu_head);
814 INIT_LIST_HEAD(&dev_opp->opp_list);
816 /* Secure the device list modification */
817 list_add_rcu(&dev_opp->node, &dev_opp_list);
822 * _kfree_device_rcu() - Free device_opp RCU handler
825 static void _kfree_device_rcu(struct rcu_head *head)
827 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
829 kfree_rcu(device_opp, rcu_head);
833 * _remove_device_opp() - Removes a device OPP table
834 * @dev_opp: device OPP table to be removed.
836 * Removes/frees device OPP table it it doesn't contain any OPPs.
838 static void _remove_device_opp(struct device_opp *dev_opp)
840 struct device_list_opp *list_dev;
842 if (!list_empty(&dev_opp->opp_list))
845 if (dev_opp->supported_hw)
848 if (dev_opp->prop_name)
851 if (!IS_ERR(dev_opp->regulator))
855 if (!IS_ERR(dev_opp->clk))
856 clk_put(dev_opp->clk);
858 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
861 _remove_list_dev(list_dev, dev_opp);
863 /* dev_list must be empty now */
864 WARN_ON(!list_empty(&dev_opp->dev_list));
866 list_del_rcu(&dev_opp->node);
867 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
872 * _kfree_opp_rcu() - Free OPP RCU handler
875 static void _kfree_opp_rcu(struct rcu_head *head)
877 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
879 kfree_rcu(opp, rcu_head);
883 * _opp_remove() - Remove an OPP from a table definition
884 * @dev_opp: points back to the device_opp struct this opp belongs to
885 * @opp: pointer to the OPP to remove
886 * @notify: OPP_EVENT_REMOVE notification should be sent or not
888 * This function removes an opp definition from the opp list.
890 * Locking: The internal device_opp and opp structures are RCU protected.
891 * It is assumed that the caller holds required mutex for an RCU updater
894 static void _opp_remove(struct device_opp *dev_opp,
895 struct dev_pm_opp *opp, bool notify)
898 * Notify the changes in the availability of the operable
899 * frequency/voltage list.
902 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
903 opp_debug_remove_one(opp);
904 list_del_rcu(&opp->node);
905 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
907 _remove_device_opp(dev_opp);
911 * dev_pm_opp_remove() - Remove an OPP from OPP list
912 * @dev: device for which we do this operation
913 * @freq: OPP to remove with matching 'freq'
915 * This function removes an opp from the opp list.
917 * Locking: The internal device_opp and opp structures are RCU protected.
918 * Hence this function internally uses RCU updater strategy with mutex locks
919 * to keep the integrity of the internal data structures. Callers should ensure
920 * that this function is *NOT* called under RCU protection or in contexts where
921 * mutex cannot be locked.
923 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
925 struct dev_pm_opp *opp;
926 struct device_opp *dev_opp;
929 /* Hold our list modification lock here */
930 mutex_lock(&dev_opp_list_lock);
932 dev_opp = _find_device_opp(dev);
936 list_for_each_entry(opp, &dev_opp->opp_list, node) {
937 if (opp->rate == freq) {
944 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
949 _opp_remove(dev_opp, opp, true);
951 mutex_unlock(&dev_opp_list_lock);
953 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
955 static struct dev_pm_opp *_allocate_opp(struct device *dev,
956 struct device_opp **dev_opp)
958 struct dev_pm_opp *opp;
960 /* allocate new OPP node */
961 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
965 INIT_LIST_HEAD(&opp->node);
967 *dev_opp = _add_device_opp(dev);
976 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
977 struct device_opp *dev_opp)
979 struct regulator *reg = dev_opp->regulator;
982 !regulator_is_supported_voltage(reg, opp->u_volt_min,
984 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
985 __func__, opp->u_volt_min, opp->u_volt_max);
992 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
993 struct device_opp *dev_opp)
995 struct dev_pm_opp *opp;
996 struct list_head *head = &dev_opp->opp_list;
1000 * Insert new OPP in order of increasing frequency and discard if
1003 * Need to use &dev_opp->opp_list in the condition part of the 'for'
1004 * loop, don't replace it with head otherwise it will become an infinite
1007 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
1008 if (new_opp->rate > opp->rate) {
1013 if (new_opp->rate < opp->rate)
1016 /* Duplicate OPPs */
1017 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1018 __func__, opp->rate, opp->u_volt, opp->available,
1019 new_opp->rate, new_opp->u_volt, new_opp->available);
1021 return opp->available && new_opp->u_volt == opp->u_volt ?
1025 new_opp->dev_opp = dev_opp;
1026 list_add_rcu(&new_opp->node, head);
1028 ret = opp_debug_create_one(new_opp, dev_opp);
1030 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1033 if (!_opp_supported_by_regulators(new_opp, dev_opp)) {
1034 new_opp->available = false;
1035 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1036 __func__, new_opp->rate);
1043 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1044 * @dev: device for which we do this operation
1045 * @freq: Frequency in Hz for this OPP
1046 * @u_volt: Voltage in uVolts for this OPP
1047 * @dynamic: Dynamically added OPPs.
1049 * This function adds an opp definition to the opp list and returns status.
1050 * The opp is made available by default and it can be controlled using
1051 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1053 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1054 * and freed by dev_pm_opp_of_remove_table.
1056 * Locking: The internal device_opp and opp structures are RCU protected.
1057 * Hence this function internally uses RCU updater strategy with mutex locks
1058 * to keep the integrity of the internal data structures. Callers should ensure
1059 * that this function is *NOT* called under RCU protection or in contexts where
1060 * mutex cannot be locked.
1064 * Duplicate OPPs (both freq and volt are same) and opp->available
1065 * -EEXIST Freq are same and volt are different OR
1066 * Duplicate OPPs (both freq and volt are same) and !opp->available
1067 * -ENOMEM Memory allocation failure
1069 static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1072 struct device_opp *dev_opp;
1073 struct dev_pm_opp *new_opp;
1077 /* Hold our list modification lock here */
1078 mutex_lock(&dev_opp_list_lock);
1080 new_opp = _allocate_opp(dev, &dev_opp);
1086 /* populate the opp table */
1087 new_opp->rate = freq;
1088 tol = u_volt * dev_opp->voltage_tolerance_v1 / 100;
1089 new_opp->u_volt = u_volt;
1090 new_opp->u_volt_min = u_volt - tol;
1091 new_opp->u_volt_max = u_volt + tol;
1092 new_opp->available = true;
1093 new_opp->dynamic = dynamic;
1095 ret = _opp_add(dev, new_opp, dev_opp);
1099 mutex_unlock(&dev_opp_list_lock);
1102 * Notify the changes in the availability of the operable
1103 * frequency/voltage list.
1105 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1109 _opp_remove(dev_opp, new_opp, false);
1111 mutex_unlock(&dev_opp_list_lock);
1115 /* TODO: Support multiple regulators */
1116 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
1117 struct device_opp *dev_opp)
1119 u32 microvolt[3] = {0};
1122 struct property *prop = NULL;
1123 char name[NAME_MAX];
1125 /* Search for "opp-microvolt-<name>" */
1126 if (dev_opp->prop_name) {
1127 snprintf(name, sizeof(name), "opp-microvolt-%s",
1128 dev_opp->prop_name);
1129 prop = of_find_property(opp->np, name, NULL);
1133 /* Search for "opp-microvolt" */
1134 sprintf(name, "opp-microvolt");
1135 prop = of_find_property(opp->np, name, NULL);
1137 /* Missing property isn't a problem, but an invalid entry is */
1142 count = of_property_count_u32_elems(opp->np, name);
1144 dev_err(dev, "%s: Invalid %s property (%d)\n",
1145 __func__, name, count);
1149 /* There can be one or three elements here */
1150 if (count != 1 && count != 3) {
1151 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
1152 __func__, name, count);
1156 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
1158 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
1162 opp->u_volt = microvolt[0];
1165 opp->u_volt_min = opp->u_volt;
1166 opp->u_volt_max = opp->u_volt;
1168 opp->u_volt_min = microvolt[1];
1169 opp->u_volt_max = microvolt[2];
1172 /* Search for "opp-microamp-<name>" */
1174 if (dev_opp->prop_name) {
1175 snprintf(name, sizeof(name), "opp-microamp-%s",
1176 dev_opp->prop_name);
1177 prop = of_find_property(opp->np, name, NULL);
1181 /* Search for "opp-microamp" */
1182 sprintf(name, "opp-microamp");
1183 prop = of_find_property(opp->np, name, NULL);
1186 if (prop && !of_property_read_u32(opp->np, name, &val))
1193 * dev_pm_opp_set_supported_hw() - Set supported platforms
1194 * @dev: Device for which supported-hw has to be set.
1195 * @versions: Array of hierarchy of versions to match.
1196 * @count: Number of elements in the array.
1198 * This is required only for the V2 bindings, and it enables a platform to
1199 * specify the hierarchy of versions it supports. OPP layer will then enable
1200 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1203 * Locking: The internal device_opp and opp structures are RCU protected.
1204 * Hence this function internally uses RCU updater strategy with mutex locks
1205 * to keep the integrity of the internal data structures. Callers should ensure
1206 * that this function is *NOT* called under RCU protection or in contexts where
1207 * mutex cannot be locked.
1209 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1212 struct device_opp *dev_opp;
1215 /* Hold our list modification lock here */
1216 mutex_lock(&dev_opp_list_lock);
1218 dev_opp = _add_device_opp(dev);
1224 /* Make sure there are no concurrent readers while updating dev_opp */
1225 WARN_ON(!list_empty(&dev_opp->opp_list));
1227 /* Do we already have a version hierarchy associated with dev_opp? */
1228 if (dev_opp->supported_hw) {
1229 dev_err(dev, "%s: Already have supported hardware list\n",
1235 dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
1237 if (!dev_opp->supported_hw) {
1242 dev_opp->supported_hw_count = count;
1243 mutex_unlock(&dev_opp_list_lock);
1247 _remove_device_opp(dev_opp);
1249 mutex_unlock(&dev_opp_list_lock);
1253 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1256 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1257 * @dev: Device for which supported-hw has to be put.
1259 * This is required only for the V2 bindings, and is called for a matching
1260 * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
1261 * will not be freed.
1263 * Locking: The internal device_opp and opp structures are RCU protected.
1264 * Hence this function internally uses RCU updater strategy with mutex locks
1265 * to keep the integrity of the internal data structures. Callers should ensure
1266 * that this function is *NOT* called under RCU protection or in contexts where
1267 * mutex cannot be locked.
1269 void dev_pm_opp_put_supported_hw(struct device *dev)
1271 struct device_opp *dev_opp;
1273 /* Hold our list modification lock here */
1274 mutex_lock(&dev_opp_list_lock);
1276 /* Check for existing list for 'dev' first */
1277 dev_opp = _find_device_opp(dev);
1278 if (IS_ERR(dev_opp)) {
1279 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1283 /* Make sure there are no concurrent readers while updating dev_opp */
1284 WARN_ON(!list_empty(&dev_opp->opp_list));
1286 if (!dev_opp->supported_hw) {
1287 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1292 kfree(dev_opp->supported_hw);
1293 dev_opp->supported_hw = NULL;
1294 dev_opp->supported_hw_count = 0;
1296 /* Try freeing device_opp if this was the last blocking resource */
1297 _remove_device_opp(dev_opp);
1300 mutex_unlock(&dev_opp_list_lock);
1302 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1305 * dev_pm_opp_set_prop_name() - Set prop-extn name
1306 * @dev: Device for which the prop-name has to be set.
1307 * @name: name to postfix to properties.
1309 * This is required only for the V2 bindings, and it enables a platform to
1310 * specify the extn to be used for certain property names. The properties to
1311 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1312 * should postfix the property name with -<name> while looking for them.
1314 * Locking: The internal device_opp and opp structures are RCU protected.
1315 * Hence this function internally uses RCU updater strategy with mutex locks
1316 * to keep the integrity of the internal data structures. Callers should ensure
1317 * that this function is *NOT* called under RCU protection or in contexts where
1318 * mutex cannot be locked.
1320 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1322 struct device_opp *dev_opp;
1325 /* Hold our list modification lock here */
1326 mutex_lock(&dev_opp_list_lock);
1328 dev_opp = _add_device_opp(dev);
1334 /* Make sure there are no concurrent readers while updating dev_opp */
1335 WARN_ON(!list_empty(&dev_opp->opp_list));
1337 /* Do we already have a prop-name associated with dev_opp? */
1338 if (dev_opp->prop_name) {
1339 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1340 dev_opp->prop_name);
1345 dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
1346 if (!dev_opp->prop_name) {
1351 mutex_unlock(&dev_opp_list_lock);
1355 _remove_device_opp(dev_opp);
1357 mutex_unlock(&dev_opp_list_lock);
1361 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1364 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1365 * @dev: Device for which the prop-name has to be put.
1367 * This is required only for the V2 bindings, and is called for a matching
1368 * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
1369 * will not be freed.
1371 * Locking: The internal device_opp and opp structures are RCU protected.
1372 * Hence this function internally uses RCU updater strategy with mutex locks
1373 * to keep the integrity of the internal data structures. Callers should ensure
1374 * that this function is *NOT* called under RCU protection or in contexts where
1375 * mutex cannot be locked.
1377 void dev_pm_opp_put_prop_name(struct device *dev)
1379 struct device_opp *dev_opp;
1381 /* Hold our list modification lock here */
1382 mutex_lock(&dev_opp_list_lock);
1384 /* Check for existing list for 'dev' first */
1385 dev_opp = _find_device_opp(dev);
1386 if (IS_ERR(dev_opp)) {
1387 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1391 /* Make sure there are no concurrent readers while updating dev_opp */
1392 WARN_ON(!list_empty(&dev_opp->opp_list));
1394 if (!dev_opp->prop_name) {
1395 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1399 kfree(dev_opp->prop_name);
1400 dev_opp->prop_name = NULL;
1402 /* Try freeing device_opp if this was the last blocking resource */
1403 _remove_device_opp(dev_opp);
1406 mutex_unlock(&dev_opp_list_lock);
1408 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1411 * dev_pm_opp_set_regulator() - Set regulator name for the device
1412 * @dev: Device for which regulator name is being set.
1413 * @name: Name of the regulator.
1415 * In order to support OPP switching, OPP layer needs to know the name of the
1416 * device's regulator, as the core would be required to switch voltages as well.
1418 * This must be called before any OPPs are initialized for the device.
1420 * Locking: The internal device_opp and opp structures are RCU protected.
1421 * Hence this function internally uses RCU updater strategy with mutex locks
1422 * to keep the integrity of the internal data structures. Callers should ensure
1423 * that this function is *NOT* called under RCU protection or in contexts where
1424 * mutex cannot be locked.
1426 int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1428 struct device_opp *dev_opp;
1429 struct regulator *reg;
1432 mutex_lock(&dev_opp_list_lock);
1434 dev_opp = _add_device_opp(dev);
1440 /* This should be called before OPPs are initialized */
1441 if (WARN_ON(!list_empty(&dev_opp->opp_list))) {
1446 /* Already have a regulator set */
1447 if (WARN_ON(!IS_ERR(dev_opp->regulator))) {
1451 /* Allocate the regulator */
1452 reg = regulator_get_optional(dev, name);
1455 if (ret != -EPROBE_DEFER)
1456 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1457 __func__, name, ret);
1461 dev_opp->regulator = reg;
1463 mutex_unlock(&dev_opp_list_lock);
1467 _remove_device_opp(dev_opp);
1469 mutex_unlock(&dev_opp_list_lock);
1473 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1476 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1477 * @dev: Device for which regulator was set.
1479 * Locking: The internal device_opp and opp structures are RCU protected.
1480 * Hence this function internally uses RCU updater strategy with mutex locks
1481 * to keep the integrity of the internal data structures. Callers should ensure
1482 * that this function is *NOT* called under RCU protection or in contexts where
1483 * mutex cannot be locked.
1485 void dev_pm_opp_put_regulator(struct device *dev)
1487 struct device_opp *dev_opp;
1489 mutex_lock(&dev_opp_list_lock);
1491 /* Check for existing list for 'dev' first */
1492 dev_opp = _find_device_opp(dev);
1493 if (IS_ERR(dev_opp)) {
1494 dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
1498 if (IS_ERR(dev_opp->regulator)) {
1499 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1503 /* Make sure there are no concurrent readers while updating dev_opp */
1504 WARN_ON(!list_empty(&dev_opp->opp_list));
1506 regulator_put(dev_opp->regulator);
1507 dev_opp->regulator = ERR_PTR(-ENXIO);
1509 /* Try freeing device_opp if this was the last blocking resource */
1510 _remove_device_opp(dev_opp);
1513 mutex_unlock(&dev_opp_list_lock);
1515 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1517 static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
1518 struct device_node *np)
1520 unsigned int count = dev_opp->supported_hw_count;
1524 if (!dev_opp->supported_hw)
1528 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1531 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1532 __func__, count, ret);
1536 /* Both of these are bitwise masks of the versions */
1537 if (!(version & dev_opp->supported_hw[count]))
1545 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1546 * @dev: device for which we do this operation
1549 * This function adds an opp definition to the opp list and returns status. The
1550 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1551 * removed by dev_pm_opp_remove.
1553 * Locking: The internal device_opp and opp structures are RCU protected.
1554 * Hence this function internally uses RCU updater strategy with mutex locks
1555 * to keep the integrity of the internal data structures. Callers should ensure
1556 * that this function is *NOT* called under RCU protection or in contexts where
1557 * mutex cannot be locked.
1561 * Duplicate OPPs (both freq and volt are same) and opp->available
1562 * -EEXIST Freq are same and volt are different OR
1563 * Duplicate OPPs (both freq and volt are same) and !opp->available
1564 * -ENOMEM Memory allocation failure
1565 * -EINVAL Failed parsing the OPP node
1567 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1569 struct device_opp *dev_opp;
1570 struct dev_pm_opp *new_opp;
1575 /* Hold our list modification lock here */
1576 mutex_lock(&dev_opp_list_lock);
1578 new_opp = _allocate_opp(dev, &dev_opp);
1584 ret = of_property_read_u64(np, "opp-hz", &rate);
1586 dev_err(dev, "%s: opp-hz not found\n", __func__);
1590 /* Check if the OPP supports hardware's hierarchy of versions or not */
1591 if (!_opp_is_supported(dev, dev_opp, np)) {
1592 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1597 * Rate is defined as an unsigned long in clk API, and so casting
1598 * explicitly to its type. Must be fixed once rate is 64 bit
1599 * guaranteed in clk API.
1601 new_opp->rate = (unsigned long)rate;
1602 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1605 new_opp->dynamic = false;
1606 new_opp->available = true;
1608 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1609 new_opp->clock_latency_ns = val;
1611 ret = opp_parse_supplies(new_opp, dev, dev_opp);
1615 ret = _opp_add(dev, new_opp, dev_opp);
1619 /* OPP to select on device suspend */
1620 if (of_property_read_bool(np, "opp-suspend")) {
1621 if (dev_opp->suspend_opp) {
1622 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1623 __func__, dev_opp->suspend_opp->rate,
1626 new_opp->suspend = true;
1627 dev_opp->suspend_opp = new_opp;
1631 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
1632 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
1634 mutex_unlock(&dev_opp_list_lock);
1636 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
1637 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
1638 new_opp->u_volt_min, new_opp->u_volt_max,
1639 new_opp->clock_latency_ns);
1642 * Notify the changes in the availability of the operable
1643 * frequency/voltage list.
1645 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
1649 _opp_remove(dev_opp, new_opp, false);
1651 mutex_unlock(&dev_opp_list_lock);
1656 * dev_pm_opp_add() - Add an OPP table from a table definitions
1657 * @dev: device for which we do this operation
1658 * @freq: Frequency in Hz for this OPP
1659 * @u_volt: Voltage in uVolts for this OPP
1661 * This function adds an opp definition to the opp list and returns status.
1662 * The opp is made available by default and it can be controlled using
1663 * dev_pm_opp_enable/disable functions.
1665 * Locking: The internal device_opp and opp structures are RCU protected.
1666 * Hence this function internally uses RCU updater strategy with mutex locks
1667 * to keep the integrity of the internal data structures. Callers should ensure
1668 * that this function is *NOT* called under RCU protection or in contexts where
1669 * mutex cannot be locked.
1673 * Duplicate OPPs (both freq and volt are same) and opp->available
1674 * -EEXIST Freq are same and volt are different OR
1675 * Duplicate OPPs (both freq and volt are same) and !opp->available
1676 * -ENOMEM Memory allocation failure
1678 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1680 return _opp_add_v1(dev, freq, u_volt, true);
1682 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1685 * _opp_set_availability() - helper to set the availability of an opp
1686 * @dev: device for which we do this operation
1687 * @freq: OPP frequency to modify availability
1688 * @availability_req: availability status requested for this opp
1690 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1691 * share a common logic which is isolated here.
1693 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1694 * copy operation, returns 0 if no modification was done OR modification was
1697 * Locking: The internal device_opp and opp structures are RCU protected.
1698 * Hence this function internally uses RCU updater strategy with mutex locks to
1699 * keep the integrity of the internal data structures. Callers should ensure
1700 * that this function is *NOT* called under RCU protection or in contexts where
1701 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1703 static int _opp_set_availability(struct device *dev, unsigned long freq,
1704 bool availability_req)
1706 struct device_opp *dev_opp;
1707 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1710 /* keep the node allocated */
1711 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1715 mutex_lock(&dev_opp_list_lock);
1717 /* Find the device_opp */
1718 dev_opp = _find_device_opp(dev);
1719 if (IS_ERR(dev_opp)) {
1720 r = PTR_ERR(dev_opp);
1721 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1725 /* Do we have the frequency? */
1726 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
1727 if (tmp_opp->rate == freq) {
1737 /* Is update really needed? */
1738 if (opp->available == availability_req)
1740 /* copy the old data over */
1743 /* plug in new node */
1744 new_opp->available = availability_req;
1746 list_replace_rcu(&opp->node, &new_opp->node);
1747 mutex_unlock(&dev_opp_list_lock);
1748 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1750 /* Notify the change of the OPP availability */
1751 if (availability_req)
1752 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
1755 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
1761 mutex_unlock(&dev_opp_list_lock);
1767 * dev_pm_opp_enable() - Enable a specific OPP
1768 * @dev: device for which we do this operation
1769 * @freq: OPP frequency to enable
1771 * Enables a provided opp. If the operation is valid, this returns 0, else the
1772 * corresponding error value. It is meant to be used for users an OPP available
1773 * after being temporarily made unavailable with dev_pm_opp_disable.
1775 * Locking: The internal device_opp and opp structures are RCU protected.
1776 * Hence this function indirectly uses RCU and mutex locks to keep the
1777 * integrity of the internal data structures. Callers should ensure that
1778 * this function is *NOT* called under RCU protection or in contexts where
1779 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1781 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1782 * copy operation, returns 0 if no modification was done OR modification was
1785 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1787 return _opp_set_availability(dev, freq, true);
1789 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1792 * dev_pm_opp_disable() - Disable a specific OPP
1793 * @dev: device for which we do this operation
1794 * @freq: OPP frequency to disable
1796 * Disables a provided opp. If the operation is valid, this returns
1797 * 0, else the corresponding error value. It is meant to be a temporary
1798 * control by users to make this OPP not available until the circumstances are
1799 * right to make it available again (with a call to dev_pm_opp_enable).
1801 * Locking: The internal device_opp and opp structures are RCU protected.
1802 * Hence this function indirectly uses RCU and mutex locks to keep the
1803 * integrity of the internal data structures. Callers should ensure that
1804 * this function is *NOT* called under RCU protection or in contexts where
1805 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1807 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1808 * copy operation, returns 0 if no modification was done OR modification was
1811 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1813 return _opp_set_availability(dev, freq, false);
1815 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1818 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1819 * @dev: device pointer used to lookup device OPPs.
1821 * Return: pointer to notifier head if found, otherwise -ENODEV or
1822 * -EINVAL based on type of error casted as pointer. value must be checked
1823 * with IS_ERR to determine valid pointer or error result.
1825 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1826 * protected pointer. The reason for the same is that the opp pointer which is
1827 * returned will remain valid for use with opp_get_{voltage, freq} only while
1828 * under the locked area. The pointer returned must be used prior to unlocking
1829 * with rcu_read_unlock() to maintain the integrity of the pointer.
1831 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1833 struct device_opp *dev_opp = _find_device_opp(dev);
1835 if (IS_ERR(dev_opp))
1836 return ERR_CAST(dev_opp); /* matching type */
1838 return &dev_opp->srcu_head;
1840 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1844 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1846 * @dev: device pointer used to lookup device OPPs.
1848 * Free OPPs created using static entries present in DT.
1850 * Locking: The internal device_opp and opp structures are RCU protected.
1851 * Hence this function indirectly uses RCU updater strategy with mutex locks
1852 * to keep the integrity of the internal data structures. Callers should ensure
1853 * that this function is *NOT* called under RCU protection or in contexts where
1854 * mutex cannot be locked.
1856 void dev_pm_opp_of_remove_table(struct device *dev)
1858 struct device_opp *dev_opp;
1859 struct dev_pm_opp *opp, *tmp;
1861 /* Hold our list modification lock here */
1862 mutex_lock(&dev_opp_list_lock);
1864 /* Check for existing list for 'dev' */
1865 dev_opp = _find_device_opp(dev);
1866 if (IS_ERR(dev_opp)) {
1867 int error = PTR_ERR(dev_opp);
1869 if (error != -ENODEV)
1870 WARN(1, "%s: dev_opp: %d\n",
1871 IS_ERR_OR_NULL(dev) ?
1872 "Invalid device" : dev_name(dev),
1877 /* Find if dev_opp manages a single device */
1878 if (list_is_singular(&dev_opp->dev_list)) {
1879 /* Free static OPPs */
1880 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1882 _opp_remove(dev_opp, opp, true);
1885 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
1889 mutex_unlock(&dev_opp_list_lock);
1891 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
1893 /* Returns opp descriptor node for a device, caller must do of_node_put() */
1894 struct device_node *_of_get_opp_desc_node(struct device *dev)
1897 * TODO: Support for multiple OPP tables.
1899 * There should be only ONE phandle present in "operating-points-v2"
1903 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
1906 /* Initializes OPP tables based on new bindings */
1907 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1909 struct device_node *np;
1910 struct device_opp *dev_opp;
1911 int ret = 0, count = 0;
1913 mutex_lock(&dev_opp_list_lock);
1915 dev_opp = _managed_opp(opp_np);
1917 /* OPPs are already managed */
1918 if (!_add_list_dev(dev, dev_opp))
1920 mutex_unlock(&dev_opp_list_lock);
1923 mutex_unlock(&dev_opp_list_lock);
1925 /* We have opp-list node now, iterate over it and add OPPs */
1926 for_each_available_child_of_node(opp_np, np) {
1929 ret = _opp_add_static_v2(dev, np);
1931 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1937 /* There should be one of more OPP defined */
1938 if (WARN_ON(!count))
1941 mutex_lock(&dev_opp_list_lock);
1943 dev_opp = _find_device_opp(dev);
1944 if (WARN_ON(IS_ERR(dev_opp))) {
1945 ret = PTR_ERR(dev_opp);
1946 mutex_unlock(&dev_opp_list_lock);
1950 dev_opp->np = opp_np;
1951 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1953 mutex_unlock(&dev_opp_list_lock);
1958 dev_pm_opp_of_remove_table(dev);
1963 /* Initializes OPP tables based on old-deprecated bindings */
1964 static int _of_add_opp_table_v1(struct device *dev)
1966 const struct property *prop;
1970 prop = of_find_property(dev->of_node, "operating-points", NULL);
1977 * Each OPP is a set of tuples consisting of frequency and
1978 * voltage like <freq-kHz vol-uV>.
1980 nr = prop->length / sizeof(u32);
1982 dev_err(dev, "%s: Invalid OPP list\n", __func__);
1988 unsigned long freq = be32_to_cpup(val++) * 1000;
1989 unsigned long volt = be32_to_cpup(val++);
1991 if (_opp_add_v1(dev, freq, volt, false))
1992 dev_warn(dev, "%s: Failed to add OPP %ld\n",
2001 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
2002 * @dev: device pointer used to lookup device OPPs.
2004 * Register the initial OPP table with the OPP library for given device.
2006 * Locking: The internal device_opp and opp structures are RCU protected.
2007 * Hence this function indirectly uses RCU updater strategy with mutex locks
2008 * to keep the integrity of the internal data structures. Callers should ensure
2009 * that this function is *NOT* called under RCU protection or in contexts where
2010 * mutex cannot be locked.
2014 * Duplicate OPPs (both freq and volt are same) and opp->available
2015 * -EEXIST Freq are same and volt are different OR
2016 * Duplicate OPPs (both freq and volt are same) and !opp->available
2017 * -ENOMEM Memory allocation failure
2018 * -ENODEV when 'operating-points' property is not found or is invalid data
2020 * -ENODATA when empty 'operating-points' property is found
2021 * -EINVAL when invalid entries are found in opp-v2 table
2023 int dev_pm_opp_of_add_table(struct device *dev)
2025 struct device_node *opp_np;
2029 * OPPs have two version of bindings now. The older one is deprecated,
2030 * try for the new binding first.
2032 opp_np = _of_get_opp_desc_node(dev);
2035 * Try old-deprecated bindings for backward compatibility with
2038 return _of_add_opp_table_v1(dev);
2041 ret = _of_add_opp_table_v2(dev, opp_np);
2042 of_node_put(opp_np);
2046 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);