return fail_clk;
}
+static int clk_core_set_rate_nolock(struct clk_core *core,
+ unsigned long req_rate);
+
/*
* walk down a subtree and set the new rates notifying the rate
* change on the way
else if (core->parent)
best_parent_rate = core->parent->rate;
+ if (core->flags & CLK_SET_RATE_UNGATE) {
+ unsigned long flags;
+
+ clk_core_prepare(core);
+ flags = clk_enable_lock();
+ clk_core_enable(core);
+ clk_enable_unlock(flags);
+ }
+
if (core->new_parent && core->new_parent != core->parent) {
old_parent = __clk_set_parent_before(core, core->new_parent);
trace_clk_set_parent(core, core->new_parent);
core->rate = clk_recalc(core, best_parent_rate);
+ if (core->flags & CLK_SET_RATE_UNGATE) {
+ unsigned long flags;
+
+ flags = clk_enable_lock();
+ clk_core_disable(core);
+ clk_enable_unlock(flags);
+ clk_core_unprepare(core);
+ }
+
if (core->notifier_count && old_rate != core->rate)
__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
/* handle the new child who might not be in core->children yet */
if (core->new_child)
clk_change_rate(core->new_child);
+
+ /* handle a changed clock that needs to readjust its rate */
+ if (core->flags & CLK_KEEP_REQ_RATE && core->req_rate
+ && core->new_rate != old_rate
+ && core->new_rate != core->req_rate)
+ clk_core_set_rate_nolock(core, core->req_rate);
}
static int clk_core_set_rate_nolock(struct clk_core *core,
return 0;
/* bail early if nothing to do */
- if (rate == clk_core_get_rate_nolock(core))
+ if (rate == clk_core_get_rate_nolock(core)) {
+ core->req_rate = req_rate;
return 0;
+ }
if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
return -EBUSY;
return -EBUSY;
}
+ core->req_rate = req_rate;
+
/* change the rates */
clk_change_rate(top);
- core->req_rate = req_rate;
-
return ret;
}
clk_prepare_lock();
if (min != clk->min_rate || max != clk->max_rate) {
+ unsigned long rate = clk->core->req_rate;
+
+ if (!rate)
+ rate = clk->core->rate;
+
clk->min_rate = min;
clk->max_rate = max;
- ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+ ret = clk_core_set_rate_nolock(clk->core, rate);
}
clk_prepare_unlock();
clk_prepare_lock();
+ /* bail early if nothing to do */
+ if (degrees == clk->core->phase)
+ goto out;
+
trace_clk_set_phase(clk->core, degrees);
if (clk->core->ops->set_phase)
if (!ret)
clk->core->phase = degrees;
+out:
clk_prepare_unlock();
return ret;
rate = core->parent->rate;
else
rate = 0;
- core->rate = core->req_rate = rate;
+ core->rate = rate;
/*
* walk the list of orphan clocks and reparent any that are children of
return ret;
}
-struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
- const char *con_id)
+static struct clk *clk_hw_create_clk(struct clk_hw *hw, const char *dev_id,
+ const char *con_id)
{
struct clk *clk;
- /* This is to allow this function to be chained to others */
- if (!hw || IS_ERR(hw))
- return (struct clk *) hw;
-
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return ERR_PTR(-ENOMEM);
return clk;
}
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+ const char *con_id, bool with_orphans)
+{
+ /* This is to allow this function to be chained to others */
+ if (!hw || IS_ERR(hw))
+ return (struct clk *) hw;
+
+ if (hw->core->orphan && !with_orphans)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return clk_hw_create_clk(hw, dev_id, con_id);
+}
+
void __clk_free_clk(struct clk *clk)
{
clk_prepare_lock();
INIT_HLIST_HEAD(&core->clks);
- hw->clk = __clk_create_clk(hw, NULL, NULL);
+ hw->clk = clk_hw_create_clk(hw, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
goto fail_parent_names_copy;
void __clk_put(struct clk *clk)
{
+ unsigned long rate;
struct module *owner;
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
clk_prepare_lock();
hlist_del(&clk->clks_node);
- if (clk->min_rate > clk->core->req_rate ||
- clk->max_rate < clk->core->req_rate)
- clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+
+ rate = clk->core->req_rate;
+ if (!rate)
+ rate = clk->core->rate;
+
+ if (clk->min_rate > rate || clk->max_rate < rate)
+ clk_core_set_rate_nolock(clk->core, rate);
owner = clk->core->owner;
kref_put(&clk->core->ref, __clk_release);
EXPORT_SYMBOL_GPL(of_clk_del_provider);
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
- const char *dev_id, const char *con_id)
+ const char *dev_id, const char *con_id,
+ bool with_orphans)
{
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-EPROBE_DEFER);
clk = provider->get(clkspec, provider->data);
if (!IS_ERR(clk)) {
clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
- con_id);
+ con_id, with_orphans);
if (!IS_ERR(clk) && !__clk_get(clk)) {
__clk_free_clk(clk);
*/
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
- return __of_clk_get_from_provider(clkspec, NULL, __func__);
+ return __of_clk_get_from_provider(clkspec, NULL, __func__, false);
+}
+
+/**
+ * of_clk_get_from_provider_with_orphans() - Lookup clock from a clock provider
+ * @clkspec: pointer to a clock specifier data structure
+ *
+ * This function looks up a struct clk from the registered list of clock
+ * providers, an input is a clock specifier data structure as returned
+ * from the of_parse_phandle_with_args() function call.
+ *
+ * The difference to of_clk_get_from_provider() is that this function will
+ * also successfully lookup orphan-clocks, as it in some cases may be
+ * necessary to access such orphan-clocks as well.
+ */
+struct clk *
+of_clk_get_from_provider_with_orphans(struct of_phandle_args *clkspec)
+{
+ return __of_clk_get_from_provider(clkspec, NULL, __func__, true);
}
int of_clk_get_parent_count(struct device_node *np)