1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016 Maxime Ripard
*
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include "ccu_common.h"
#include "ccu_gate.h"
#include "ccu_reset.h"
static DEFINE_SPINLOCK(ccu_lock);
void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
{
void __iomem *addr;
u32 reg;
if (!lock)
return;
if (common->features & CCU_FEATURE_LOCK_REG)
addr = common->base + common->lock_reg;
else
addr = common->base + common->reg;
WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
}
/*
* This clock notifier is called when the frequency of a PLL clock is
* changed. In common PLL designs, changes to the dividers take effect
* almost immediately, while changes to the multipliers (implemented
* as dividers in the feedback loop) take a few cycles to work into
* the feedback loop for the PLL to stablize.
*
* Sometimes when the PLL clock rate is changed, the decrease in the
* divider is too much for the decrease in the multiplier to catch up.
* The PLL clock rate will spike, and in some cases, might lock up
* completely.
*
* This notifier callback will gate and then ungate the clock,
* effectively resetting it, so it proceeds to work. Care must be
* taken to reparent consumers to other temporary clocks during the
* rate change, and that this notifier callback must be the first
* to be registered.
*/
static int ccu_pll_notifier_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
int ret = 0;
if (event != POST_RATE_CHANGE)
goto out;
ccu_gate_helper_disable(pll->common, pll->enable);
ret = ccu_gate_helper_enable(pll->common, pll->enable);
if (ret)
goto out;
ccu_helper_wait_for_lock(pll->common, pll->lock);
out:
return notifier_from_errno(ret);
}
int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
{
pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
return clk_notifier_register(pll_nb->common->hw.clk,
&pll_nb->clk_nb);
}
int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
const struct sunxi_ccu_desc *desc)
{
struct ccu_reset *reset;
int i, ret;
for (i = 0; i < desc->num_ccu_clks; i++) {
struct ccu_common *cclk = desc->ccu_clks[i];
if (!cclk)
continue;
cclk->base = reg;
cclk->lock = &ccu_lock;
}
for (i = 0; i < desc->hw_clks->num ; i++) {
struct clk_hw *hw = desc->hw_clks->hws[i];
if (!hw)
continue;
ret = clk_hw_register(NULL, hw);
if (ret) {
pr_err("Couldn't register clock %d - %s\n",
i, clk_hw_get_name(hw));
goto err_clk_unreg;
}
}
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
desc->hw_clks);
if (ret)
goto err_clk_unreg;
reset = kzalloc(sizeof(*reset), GFP_KERNEL);
if (!reset) {
ret = -ENOMEM;
goto err_alloc_reset;
}
reset->rcdev.of_node = node;
reset->rcdev.ops = &ccu_reset_ops;
reset->rcdev.owner = THIS_MODULE;
reset->rcdev.nr_resets = desc->num_resets;
reset->base = reg;
reset->lock = &ccu_lock;
reset->reset_map = desc->resets;
ret = reset_controller_register(&reset->rcdev);
if (ret)
goto err_of_clk_unreg;
return 0;
err_of_clk_unreg:
kfree(reset);
err_alloc_reset:
of_clk_del_provider(node);
err_clk_unreg:
while (--i >= 0) {
struct clk_hw *hw = desc->hw_clks->hws[i];
if (!hw)
continue;
clk_hw_unregister(hw);
}
return ret;
}
|